From 8d422e72e75657909be1beed048ee3dc034f4c23 Mon Sep 17 00:00:00 2001 From: Alex Cunningham Date: Mon, 25 Jun 2012 16:47:39 +0000 Subject: [PATCH] Adding in Eigen 3.1 --- gtsam/3rdparty/Eigen/CMakeLists.txt | 67 +- gtsam/3rdparty/Eigen/COPYING.BSD | 26 + gtsam/3rdparty/Eigen/CTestConfig.cmake | 2 +- gtsam/3rdparty/Eigen/Eigen/Cholesky | 7 +- gtsam/3rdparty/Eigen/Eigen/CholmodSupport | 45 + gtsam/3rdparty/Eigen/Eigen/Core | 39 +- gtsam/3rdparty/Eigen/Eigen/Eigen2Support | 25 +- gtsam/3rdparty/Eigen/Eigen/Eigenvalues | 10 +- gtsam/3rdparty/Eigen/Eigen/Geometry | 4 - gtsam/3rdparty/Eigen/Eigen/Householder | 4 - .../Eigen/Eigen/IterativeLinearSolvers | 40 + gtsam/3rdparty/Eigen/Eigen/Jacobi | 4 - gtsam/3rdparty/Eigen/Eigen/LU | 7 +- gtsam/3rdparty/Eigen/Eigen/LeastSquares | 4 - gtsam/3rdparty/Eigen/Eigen/OrderingMethods | 23 + gtsam/3rdparty/Eigen/Eigen/PaStiXSupport | 46 + gtsam/3rdparty/Eigen/Eigen/PardisoSupport | 30 + gtsam/3rdparty/Eigen/Eigen/QR | 8 +- gtsam/3rdparty/Eigen/Eigen/SVD | 7 +- gtsam/3rdparty/Eigen/Eigen/Sparse | 66 +- gtsam/3rdparty/Eigen/Eigen/SparseCholesky | 30 + gtsam/3rdparty/Eigen/Eigen/SparseCore | 66 + gtsam/3rdparty/Eigen/Eigen/SuperLUSupport | 59 + gtsam/3rdparty/Eigen/Eigen/UmfPackSupport | 36 + .../3rdparty/Eigen/Eigen/src/Cholesky/LDLT.h | 159 +- gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h | 159 +- .../Eigen/Eigen/src/Cholesky/LLT_MKL.h | 102 + .../Eigen/src/CholmodSupport/CMakeLists.txt | 6 + .../src/CholmodSupport}/CholmodSupport.h | 315 ++- gtsam/3rdparty/Eigen/Eigen/src/Core/Array.h | 3 + .../3rdparty/Eigen/Eigen/src/Core/ArrayBase.h | 10 +- .../Eigen/Eigen/src/Core/ArrayWrapper.h | 32 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Assign.h | 111 +- .../Eigen/Eigen/src/Core/Assign_MKL.h | 224 +++ .../Eigen/Eigen/src/Core/BandMatrix.h | 5 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Block.h | 27 +- .../Eigen/Eigen/src/Core/BooleanRedux.h | 16 +- .../Eigen/Eigen/src/Core/CommaInitializer.h | 4 + .../Eigen/Eigen/src/Core/CwiseBinaryOp.h | 8 +- .../Eigen/Eigen/src/Core/CwiseNullaryOp.h | 28 + .../Eigen/Eigen/src/Core/CwiseUnaryOp.h | 6 +- .../Eigen/Eigen/src/Core/CwiseUnaryView.h | 6 +- .../3rdparty/Eigen/Eigen/src/Core/DenseBase.h | 11 +- .../Eigen/Eigen/src/Core/DenseCoeffsBase.h | 12 +- .../Eigen/Eigen/src/Core/DenseStorage.h | 26 +- .../3rdparty/Eigen/Eigen/src/Core/Diagonal.h | 27 +- .../Eigen/Eigen/src/Core/DiagonalMatrix.h | 10 +- .../Eigen/Eigen/src/Core/DiagonalProduct.h | 7 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h | 12 +- .../3rdparty/Eigen/Eigen/src/Core/EigenBase.h | 3 + gtsam/3rdparty/Eigen/Eigen/src/Core/Flagged.h | 4 + .../Eigen/Eigen/src/Core/ForceAlignedAccess.h | 4 + .../3rdparty/Eigen/Eigen/src/Core/Functors.h | 94 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h | 8 +- .../Eigen/Eigen/src/Core/GeneralProduct.h | 628 ++++++ .../Eigen/Eigen/src/Core/GenericPacketMath.h | 6 +- .../Eigen/Eigen/src/Core/GlobalFunctions.h | 27 +- gtsam/3rdparty/Eigen/Eigen/src/Core/IO.h | 6 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Map.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/Core/MapBase.h | 2 + .../Eigen/Eigen/src/Core/MathFunctions.h | 20 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Matrix.h | 23 +- .../Eigen/Eigen/src/Core/MatrixBase.h | 15 +- .../Eigen/Eigen/src/Core/NestByValue.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/Core/NoAlias.h | 4 + .../3rdparty/Eigen/Eigen/src/Core/NumTraits.h | 20 +- .../Eigen/Eigen/src/Core/PermutationMatrix.h | 10 +- .../Eigen/Eigen/src/Core/PlainObjectBase.h | 82 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Product.h | 626 +----- .../Eigen/Eigen/src/Core/ProductBase.h | 17 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Random.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/Core/Redux.h | 43 +- .../3rdparty/Eigen/Eigen/src/Core/Replicate.h | 19 +- .../Eigen/Eigen/src/Core/ReturnByValue.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h | 11 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Select.h | 25 +- .../Eigen/Eigen/src/Core/SelfAdjointView.h | 20 +- .../Eigen/Eigen/src/Core/SelfCwiseBinaryOp.h | 14 + .../Eigen/Eigen/src/Core/SolveTriangular.h | 26 +- .../Eigen/Eigen/src/Core/StableNorm.h | 10 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Stride.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/Core/Swap.h | 15 + .../3rdparty/Eigen/Eigen/src/Core/Transpose.h | 10 +- .../Eigen/Eigen/src/Core/Transpositions.h | 6 +- .../Eigen/Eigen/src/Core/TriangularMatrix.h | 36 +- .../Eigen/Eigen/src/Core/VectorBlock.h | 3 + .../Eigen/Eigen/src/Core/VectorwiseOp.h | 88 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Visitor.h | 10 +- .../Eigen/src/Core/arch/AltiVec/Complex.h | 6 +- .../Eigen/src/Core/arch/AltiVec/PacketMath.h | 8 +- .../Eigen/Eigen/src/Core/arch/NEON/Complex.h | 4 + .../Eigen/src/Core/arch/NEON/PacketMath.h | 7 +- .../Eigen/Eigen/src/Core/arch/SSE/Complex.h | 10 +- .../Eigen/src/Core/arch/SSE/MathFunctions.h | 8 +- .../Eigen/src/Core/arch/SSE/PacketMath.h | 31 +- .../src/Core/products/CoeffBasedProduct.h | 42 +- .../Core/products/GeneralBlockPanelKernel.h | 254 ++- .../src/Core/products/GeneralMatrixMatrix.h | 22 +- .../products/GeneralMatrixMatrixTriangular.h | 20 +- .../GeneralMatrixMatrixTriangular_MKL.h | 146 ++ .../Core/products/GeneralMatrixMatrix_MKL.h | 118 ++ .../src/Core/products/GeneralMatrixVector.h | 20 +- .../Core/products/GeneralMatrixVector_MKL.h | 131 ++ .../Eigen/src/Core/products/Parallelizer.h | 26 +- .../Core/products/SelfadjointMatrixMatrix.h | 8 +- .../products/SelfadjointMatrixMatrix_MKL.h | 295 +++ .../Core/products/SelfadjointMatrixVector.h | 29 +- .../products/SelfadjointMatrixVector_MKL.h | 114 ++ .../src/Core/products/SelfadjointProduct.h | 10 +- .../Core/products/SelfadjointRank2Update.h | 8 +- .../Core/products/TriangularMatrixMatrix.h | 23 +- .../products/TriangularMatrixMatrix_MKL.h | 309 +++ .../Core/products/TriangularMatrixVector.h | 80 +- .../products/TriangularMatrixVector_MKL.h | 247 +++ .../Core/products/TriangularSolverMatrix.h | 93 +- .../products/TriangularSolverMatrix_MKL.h | 155 ++ .../Core/products/TriangularSolverVector.h | 4 + .../Eigen/Eigen/src/Core/util/BlasUtil.h | 26 +- .../Eigen/Eigen/src/Core/util/Constants.h | 57 +- .../src/Core/util/DisableStupidWarnings.h | 4 +- .../Eigen/src/Core/util/ForwardDeclarations.h | 6 + .../Eigen/Eigen/src/Core/util/MKL_support.h | 109 ++ .../Eigen/Eigen/src/Core/util/Macros.h | 23 +- .../Eigen/Eigen/src/Core/util/Memory.h | 84 +- .../3rdparty/Eigen/Eigen/src/Core/util/Meta.h | 21 +- .../Eigen/Eigen/src/Core/util/StaticAssert.h | 24 +- .../Eigen/Eigen/src/Core/util/XprHelper.h | 39 +- .../Eigen/Eigen/src/Eigen2Support/Block.h | 4 + .../Eigen/Eigen/src/Eigen2Support/Cwise.h | 4 + .../Eigen/src/Eigen2Support/CwiseOperators.h | 4 + .../src/Eigen2Support/Geometry/AlignedBox.h | 12 +- .../Eigen/src/Eigen2Support/Geometry/All.h | 2 +- .../src/Eigen2Support/Geometry/AngleAxis.h | 3 + .../src/Eigen2Support/Geometry/Hyperplane.h | 4 + .../Eigen2Support/Geometry/ParametrizedLine.h | 3 + .../src/Eigen2Support/Geometry/Quaternion.h | 22 +- .../src/Eigen2Support/Geometry/Rotation2D.h | 3 + .../src/Eigen2Support/Geometry/RotationBase.h | 10 +- .../src/Eigen2Support/Geometry/Scaling.h | 3 + .../src/Eigen2Support/Geometry/Transform.h | 3 + .../src/Eigen2Support/Geometry/Translation.h | 3 + .../Eigen/Eigen/src/Eigen2Support/LU.h | 4 +- .../Eigen/Eigen/src/Eigen2Support/Lazy.h | 4 + .../Eigen/src/Eigen2Support/LeastSquares.h | 3 + .../Eigen/src/Eigen2Support/MathFunctions.h | 4 + .../Eigen/Eigen/src/Eigen2Support/Memory.h | 4 +- .../Eigen/Eigen/src/Eigen2Support/Meta.h | 4 + .../Eigen/Eigen/src/Eigen2Support/Minor.h | 4 + .../Eigen/Eigen/src/Eigen2Support/QR.h | 3 + .../Eigen/Eigen/src/Eigen2Support/SVD.h | 6 +- .../src/Eigen2Support/TriangularSolver.h | 4 + .../Eigen/src/Eigen2Support/VectorBlock.h | 4 + .../src/Eigenvalues/ComplexEigenSolver.h | 4 +- .../Eigen/src/Eigenvalues/ComplexSchur.h | 47 +- .../Eigen/src/Eigenvalues/ComplexSchur_MKL.h | 94 + .../Eigen/Eigen/src/Eigenvalues/EigenSolver.h | 11 +- .../GeneralizedSelfAdjointEigenSolver.h | 5 +- .../src/Eigenvalues/HessenbergDecomposition.h | 6 +- .../src/Eigenvalues/MatrixBaseEigenvalues.h | 4 + .../Eigen/Eigen/src/Eigenvalues/RealSchur.h | 69 +- .../Eigen/src/Eigenvalues/RealSchur_MKL.h | 83 + .../src/Eigenvalues/SelfAdjointEigenSolver.h | 290 ++- .../Eigenvalues/SelfAdjointEigenSolver_MKL.h | 92 + .../src/Eigenvalues/Tridiagonalization.h | 12 +- .../Eigen/Eigen/src/Geometry/AlignedBox.h | 46 +- .../Eigen/Eigen/src/Geometry/AngleAxis.h | 6 +- .../Eigen/Eigen/src/Geometry/EulerAngles.h | 3 + .../Eigen/Eigen/src/Geometry/Homogeneous.h | 18 +- .../Eigen/Eigen/src/Geometry/Hyperplane.h | 4 + .../Eigen/Eigen/src/Geometry/OrthoMethods.h | 18 +- .../Eigen/src/Geometry/ParametrizedLine.h | 46 +- .../Eigen/Eigen/src/Geometry/Quaternion.h | 56 +- .../Eigen/Eigen/src/Geometry/Rotation2D.h | 6 +- .../Eigen/Eigen/src/Geometry/RotationBase.h | 16 +- .../Eigen/Eigen/src/Geometry/Scaling.h | 4 + .../Eigen/Eigen/src/Geometry/Transform.h | 108 +- .../Eigen/Eigen/src/Geometry/Translation.h | 10 +- .../Eigen/Eigen/src/Geometry/Umeyama.h | 4 + .../Eigen/src/Geometry/arch/Geometry_SSE.h | 14 +- .../Eigen/src/Householder/BlockHouseholder.h | 6 +- .../Eigen/Eigen/src/Householder/Householder.h | 52 +- .../src/Householder/HouseholderSequence.h | 53 +- .../BasicPreconditioners.h | 163 ++ .../src/IterativeLinearSolvers/BiCGSTAB.h | 269 +++ .../src/IterativeLinearSolvers/CMakeLists.txt | 6 + .../ConjugateGradient.h | 266 +++ .../IterativeLinearSolvers/IncompleteLUT.h | 476 +++++ .../IterativeSolverBase.h | 269 +++ .../3rdparty/Eigen/Eigen/src/Jacobi/Jacobi.h | 11 +- .../3rdparty/Eigen/Eigen/src/LU/Determinant.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h | 5 + gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h | 6 +- .../Eigen/Eigen/src/LU/PartialPivLU.h | 4 + .../Eigen/Eigen/src/LU/PartialPivLU_MKL.h | 85 + .../Eigen/Eigen/src/LU/arch/Inverse_SSE.h | 6 +- .../src/OrderingMethods}/Amd.h | 46 +- .../Eigen/src/OrderingMethods/CMakeLists.txt | 6 + .../Eigen/src/PaStiXSupport/CMakeLists.txt | 6 + .../Eigen/src/PaStiXSupport/PaStiXSupport.h | 757 ++++++++ .../Eigen/src/PardisoSupport/CMakeLists.txt | 6 + .../Eigen/src/PardisoSupport/PardisoSupport.h | 614 ++++++ .../Eigen/Eigen/src/QR/ColPivHouseholderQR.h | 3 + .../Eigen/src/QR/ColPivHouseholderQR_MKL.h | 98 + .../Eigen/Eigen/src/QR/FullPivHouseholderQR.h | 101 +- .../Eigen/Eigen/src/QR/HouseholderQR.h | 3 + .../Eigen/Eigen/src/QR/HouseholderQR_MKL.h | 69 + .../3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h | 273 ++- .../Eigen/Eigen/src/SVD/JacobiSVD_MKL.h | 92 + .../Eigen/src/SVD/UpperBidiagonalization.h | 4 + .../Eigen/Eigen/src/Sparse/CMakeLists.txt | 6 - .../Eigen/Eigen/src/Sparse/SparseMatrix.h | 651 ------- .../Eigen/src/Sparse/SparseSparseProduct.h | 401 ---- .../Eigen/src/Sparse/SparseTriangularView.h | 100 - .../Eigen/src/SparseCholesky/CMakeLists.txt | 6 + .../src/SparseCholesky/SimplicialCholesky.h | 886 +++++++++ .../src/{Sparse => SparseCore}/AmbiVector.h | 11 +- .../Eigen/Eigen/src/SparseCore/CMakeLists.txt | 6 + .../CompressedStorage.h | 15 +- .../ConservativeSparseSparseProduct.h | 260 +++ .../{Sparse => SparseCore}/CoreIterators.h | 7 +- .../MappedSparseMatrix.h | 67 +- .../src/{Sparse => SparseCore}/SparseAssign.h | 0 .../src/{Sparse => SparseCore}/SparseBlock.h | 191 +- .../SparseCwiseBinaryOp.h | 66 +- .../SparseCwiseUnaryOp.h | 118 +- .../SparseDenseProduct.h | 126 +- .../SparseDiagonalProduct.h | 4 + .../src/{Sparse => SparseCore}/SparseDot.h | 20 +- .../src/{Sparse => SparseCore}/SparseFuzzy.h | 0 .../Eigen/Eigen/src/SparseCore/SparseMatrix.h | 1127 +++++++++++ .../{Sparse => SparseCore}/SparseMatrixBase.h | 419 +--- .../Eigen/src/SparseCore/SparsePermutation.h | 163 ++ .../{Sparse => SparseCore}/SparseProduct.h | 82 +- .../src/{Sparse => SparseCore}/SparseRedux.h | 6 +- .../SparseSelfAdjointView.h | 155 +- .../SparseSparseProductWithPruning.h | 164 ++ .../{Sparse => SparseCore}/SparseTranspose.h | 20 +- .../src/SparseCore/SparseTriangularView.h | 179 ++ .../src/{Sparse => SparseCore}/SparseUtil.h | 88 +- .../src/{Sparse => SparseCore}/SparseVector.h | 184 +- .../src/{Sparse => SparseCore}/SparseView.h | 16 +- .../{Sparse => SparseCore}/TriangularSolver.h | 48 +- .../Eigen/src/SuperLUSupport/CMakeLists.txt | 6 + .../Eigen/src/SuperLUSupport/SuperLUSupport.h | 1040 ++++++++++ .../Eigen/src/UmfPackSupport/CMakeLists.txt | 6 + .../Eigen/src/UmfPackSupport/UmfPackSupport.h | 446 +++++ gtsam/3rdparty/Eigen/Eigen/src/misc/Image.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/misc/Kernel.h | 4 + gtsam/3rdparty/Eigen/Eigen/src/misc/Solve.h | 6 +- .../Solve.h => Eigen/src/misc/SparseSolve.h} | 48 +- gtsam/3rdparty/Eigen/Eigen/src/misc/blas.h | 658 +++++++ .../Eigen/src/plugins/ArrayCwiseBinaryOps.h | 56 + .../Eigen/src/plugins/MatrixCwiseBinaryOps.h | 21 + gtsam/3rdparty/Eigen/bench/BenchSparseUtil.h | 20 +- gtsam/3rdparty/Eigen/bench/BenchTimer.h | 31 +- gtsam/3rdparty/Eigen/bench/BenchUtil.h | 20 + gtsam/3rdparty/Eigen/bench/bench_gemm.cpp | 13 +- gtsam/3rdparty/Eigen/bench/btl/CMakeLists.txt | 11 +- .../bench/btl/actions/action_aat_product.hh | 2 +- .../bench/btl/actions/action_hessenberg.hh | 21 +- .../Eigen/bench/btl/actions/basic_actions.hh | 2 +- .../Eigen/bench/btl/cmake/FindEigen3.cmake | 81 - .../Eigen/bench/btl/data/action_settings.txt | 1 + gtsam/3rdparty/Eigen/bench/btl/data/go_mean | 7 +- .../bench/btl/data/perlib_plot_settings.txt | 8 +- .../btl/libs/{C_BLAS => BLAS}/CMakeLists.txt | 41 +- .../bench/btl/libs/{C_BLAS => BLAS}/blas.h | 0 .../bench/btl/libs/BLAS/blas_interface.hh | 83 + .../btl/libs/BLAS/blas_interface_impl.hh | 151 ++ .../c_interface_base.h} | 40 +- .../Eigen/bench/btl/libs/BLAS/main.cpp | 73 + .../Eigen/bench/btl/libs/C/CMakeLists.txt | 3 - .../Eigen/bench/btl/libs/C/C_interface.hh | 117 -- .../bench/btl/libs/C_BLAS/C_BLAS_interface.hh | 358 ---- .../Eigen/bench/btl/libs/C_BLAS/cblas.h | 596 ------ .../Eigen/bench/btl/libs/C_BLAS/main.cpp | 73 - .../Eigen/bench/btl/libs/STL/STL_interface.hh | 33 +- .../bench/btl/libs/STL_algo/CMakeLists.txt | 2 - .../btl/libs/STL_algo/STL_algo_interface.hh | 138 -- .../bench/btl/libs/eigen2/CMakeLists.txt | 19 + .../main.cpp => eigen2/btl_tiny_eigen2.cpp} | 25 +- .../bench/btl/libs/eigen2/eigen2_interface.hh | 168 ++ .../Eigen/bench/btl/libs/eigen2/main_adv.cpp | 44 + .../main.cpp => eigen2/main_linear.cpp} | 17 +- .../main_matmat.cpp} | 35 +- .../{C/main.cpp => eigen2/main_vecmat.cpp} | 28 +- .../bench/btl/libs/eigen3/CMakeLists.txt | 18 +- .../bench/btl/libs/eigen3/eigen3_interface.hh | 15 +- .../bench/btl/libs/eigen3/main_matmat.cpp | 2 +- .../Eigen/bench/btl/libs/f77/CMakeLists.txt | 6 - .../3rdparty/Eigen/bench/btl/libs/f77/daat.f | 14 - .../3rdparty/Eigen/bench/btl/libs/f77/data.f | 14 - .../3rdparty/Eigen/bench/btl/libs/f77/daxpy.f | 18 - .../3rdparty/Eigen/bench/btl/libs/f77/dmxm.f | 32 - .../Eigen/bench/btl/libs/f77/dmxm.f.mfr | 36 - .../3rdparty/Eigen/bench/btl/libs/f77/dmxv.f | 39 - .../Eigen/bench/btl/libs/f77/f77_interface.hh | 129 -- .../3rdparty/Eigen/bench/btl/libs/f77/saat.f | 14 - .../3rdparty/Eigen/bench/btl/libs/f77/sata.f | 14 - .../3rdparty/Eigen/bench/btl/libs/f77/saxpy.f | 16 - .../3rdparty/Eigen/bench/btl/libs/f77/smxm.f | 32 - .../3rdparty/Eigen/bench/btl/libs/f77/smxv.f | 38 - .../Eigen/bench/btl/libs/gmm/gmm_interface.hh | 2 +- .../Eigen/bench/btl/libs/gmm/main.cpp | 8 +- .../bench/btl/libs/hand_vec/CMakeLists.txt | 12 - .../btl/libs/hand_vec/hand_vec_interface.hh | 886 --------- .../Eigen/bench/btl/libs/hand_vec/main.cpp | 50 - .../bench/btl/libs/mtl4/mtl4_interface.hh | 6 +- .../Eigen/bench/btl/libs/ublas/main.cpp | 4 +- .../Eigen/bench/sparse_dense_product.cpp | 35 +- gtsam/3rdparty/Eigen/bench/sparse_product.cpp | 60 +- .../Eigen/bench/spbench/CMakeLists.txt | 65 + .../Eigen/bench/spbench/spbenchsolver.cpp | 90 + .../Eigen/bench/spbench/spbenchsolver.h | 548 ++++++ .../Eigen/blas/BandTriangularSolver.h | 112 ++ gtsam/3rdparty/Eigen/blas/CMakeLists.txt | 37 +- gtsam/3rdparty/Eigen/blas/common.h | 16 +- gtsam/3rdparty/Eigen/blas/ctbsv.f | 370 ---- gtsam/3rdparty/Eigen/blas/dtbsv.f | 339 ---- gtsam/3rdparty/Eigen/blas/level2_impl.h | 145 +- gtsam/3rdparty/Eigen/blas/level3_impl.h | 14 +- gtsam/3rdparty/Eigen/blas/stbsv.f | 339 ---- .../Eigen/blas/testing/runblastest.sh | 6 + gtsam/3rdparty/Eigen/blas/xerbla.cpp | 8 +- gtsam/3rdparty/Eigen/blas/ztbsv.f | 370 ---- .../cmake/CMakeDetermineVSServicePack.cmake | 103 + .../Eigen/cmake/EigenConfigureTesting.cmake | 79 + .../Eigen/cmake/EigenDetermineOSVersion.cmake | 46 + gtsam/3rdparty/Eigen/cmake/EigenTesting.cmake | 258 ++- gtsam/3rdparty/Eigen/cmake/FindCholmod.cmake | 1 + gtsam/3rdparty/Eigen/cmake/FindEigen2.cmake | 6 +- gtsam/3rdparty/Eigen/cmake/FindFFTW.cmake | 130 +- gtsam/3rdparty/Eigen/cmake/FindMetis.cmake | 24 + gtsam/3rdparty/Eigen/cmake/FindPastix.cmake | 25 + gtsam/3rdparty/Eigen/cmake/FindScotch.cmake | 24 + gtsam/3rdparty/Eigen/cmake/FindUmfpack.cmake | 1 + .../Eigen/cmake/language_support.cmake | 64 + gtsam/3rdparty/Eigen/debug/gdb/printers.py | 63 +- .../Eigen/debug/msvc/eigen_autoexp_part.dat | 590 +++--- .../Eigen/doc/C00_QuickStartGuide.dox | 4 + .../Eigen/doc/C06_TutorialLinearAlgebra.dox | 3 + ...TutorialReductionsVisitorsBroadcasting.dox | 19 +- .../Eigen/doc/C08_TutorialGeometry.dox | 31 +- .../3rdparty/Eigen/doc/C09_TutorialSparse.dox | 504 +++-- gtsam/3rdparty/Eigen/doc/CMakeLists.txt | 1 + .../doc/D09_StructHavingEigenMembers.dox | 61 + gtsam/3rdparty/Eigen/doc/Doxyfile.in | 29 +- .../Eigen/doc/I00_CustomizingEigen.dox | 44 +- .../3rdparty/Eigen/doc/I09_Vectorization.dox | 2 +- .../Eigen/doc/I14_PreprocessorDirectives.dox | 50 +- .../3rdparty/Eigen/doc/I15_StorageOrders.dox | 6 +- .../Eigen/doc/I16_TemplateKeyword.dox | 136 ++ gtsam/3rdparty/Eigen/doc/Overview.dox | 3 + gtsam/3rdparty/Eigen/doc/QuickReference.dox | 16 +- .../Eigen/doc/SparseQuickReference.dox | 198 ++ .../doc/TopicLinearAlgebraDecompositions.dox | 10 +- .../Eigen/doc/TopicMultithreading.dox | 46 + .../doc/TutorialSparse_example_details.dox | 4 + gtsam/3rdparty/Eigen/doc/UsingIntelMKL.dox | 168 ++ gtsam/3rdparty/Eigen/doc/eigendoxy.css | 5 + .../doc/examples/TemplateKeyword_flexible.cpp | 22 + .../doc/examples/TemplateKeyword_simple.cpp | 20 + ...sBroadcasting_broadcast_simple_rowwise.cpp | 2 +- .../Eigen/doc/snippets/Cwise_boolean_and.cpp | 2 + .../Eigen/doc/snippets/Cwise_boolean_or.cpp | 2 + .../Eigen/doc/snippets/LLT_example.cpp | 12 + .../Eigen/doc/snippets/compile_snippet.cpp.in | 8 +- .../Eigen/doc/special_examples/CMakeLists.txt | 20 + .../Tutorial_sparse_example.cpp | 32 + .../Tutorial_sparse_example_details.cpp | 44 + gtsam/3rdparty/Eigen/lapack/CMakeLists.txt | 34 +- gtsam/3rdparty/Eigen/lapack/cholesky.cpp | 4 +- gtsam/3rdparty/Eigen/scripts/eigen_gen_docs | 9 +- gtsam/3rdparty/Eigen/test/CMakeLists.txt | 132 +- gtsam/3rdparty/Eigen/test/adjoint.cpp | 24 +- gtsam/3rdparty/Eigen/test/array.cpp | 94 +- .../3rdparty/Eigen/test/array_for_matrix.cpp | 51 +- gtsam/3rdparty/Eigen/test/basicstuff.cpp | 15 +- gtsam/3rdparty/Eigen/test/bicgstab.cpp | 45 + gtsam/3rdparty/Eigen/test/block.cpp | 6 +- gtsam/3rdparty/Eigen/test/cholesky.cpp | 88 +- gtsam/3rdparty/Eigen/test/cholmod_support.cpp | 71 + .../Eigen/test/conjugate_gradient.cpp | 45 + gtsam/3rdparty/Eigen/test/cwiseop.cpp | 15 +- gtsam/3rdparty/Eigen/test/determinant.cpp | 6 +- gtsam/3rdparty/Eigen/test/diagonal.cpp | 8 +- .../3rdparty/Eigen/test/diagonalmatrices.cpp | 8 +- gtsam/3rdparty/Eigen/test/eigen2/main.h | 4 - gtsam/3rdparty/Eigen/test/eigen2support.cpp | 1 - .../Eigen/test/eigensolver_complex.cpp | 11 +- .../Eigen/test/eigensolver_generic.cpp | 26 +- .../Eigen/test/eigensolver_selfadjoint.cpp | 82 +- gtsam/3rdparty/Eigen/test/geo_alignedbox.cpp | 26 +- gtsam/3rdparty/Eigen/test/geo_eulerangles.cpp | 1 - gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp | 10 +- .../3rdparty/Eigen/test/geo_orthomethods.cpp | 4 +- .../Eigen/test/geo_parametrizedline.cpp | 11 + gtsam/3rdparty/Eigen/test/geo_quaternion.cpp | 11 + .../Eigen/test/geo_transformations.cpp | 43 +- gtsam/3rdparty/Eigen/test/gsl_helper.h | 212 --- gtsam/3rdparty/Eigen/test/hessenberg.cpp | 4 +- gtsam/3rdparty/Eigen/test/householder.cpp | 6 +- gtsam/3rdparty/Eigen/test/inverse.cpp | 2 +- gtsam/3rdparty/Eigen/test/jacobi.cpp | 4 +- gtsam/3rdparty/Eigen/test/jacobisvd.cpp | 18 +- gtsam/3rdparty/Eigen/test/linearstructure.cpp | 13 +- gtsam/3rdparty/Eigen/test/lu.cpp | 8 +- gtsam/3rdparty/Eigen/test/main.h | 122 +- gtsam/3rdparty/Eigen/test/mixingtypes.cpp | 2 +- gtsam/3rdparty/Eigen/test/nomalloc.cpp | 29 +- gtsam/3rdparty/Eigen/test/nullary.cpp | 22 +- gtsam/3rdparty/Eigen/test/pardiso_support.cpp | 29 + gtsam/3rdparty/Eigen/test/pastix_support.cpp | 59 + .../Eigen/test/permutationmatrices.cpp | 17 - gtsam/3rdparty/Eigen/test/product.h | 7 +- gtsam/3rdparty/Eigen/test/product_extra.cpp | 8 +- gtsam/3rdparty/Eigen/test/product_large.cpp | 10 +- gtsam/3rdparty/Eigen/test/product_mmtr.cpp | 8 +- .../Eigen/test/product_notemporary.cpp | 8 +- .../Eigen/test/product_selfadjoint.cpp | 9 +- gtsam/3rdparty/Eigen/test/product_small.cpp | 20 + gtsam/3rdparty/Eigen/test/product_symm.cpp | 16 +- gtsam/3rdparty/Eigen/test/product_syrk.cpp | 8 +- gtsam/3rdparty/Eigen/test/product_trmm.cpp | 130 +- gtsam/3rdparty/Eigen/test/product_trmv.cpp | 7 +- gtsam/3rdparty/Eigen/test/product_trsolve.cpp | 12 +- gtsam/3rdparty/Eigen/test/qr.cpp | 4 +- gtsam/3rdparty/Eigen/test/qr_colpivoting.cpp | 2 +- gtsam/3rdparty/Eigen/test/redux.cpp | 47 +- gtsam/3rdparty/Eigen/test/schur_complex.cpp | 2 +- gtsam/3rdparty/Eigen/test/schur_real.cpp | 2 +- gtsam/3rdparty/Eigen/test/selfadjoint.cpp | 7 +- .../Eigen/test/simplicial_cholesky.cpp | 55 + gtsam/3rdparty/Eigen/test/smallvectors.cpp | 25 + gtsam/3rdparty/Eigen/test/sparse.h | 63 +- gtsam/3rdparty/Eigen/test/sparse_basic.cpp | 166 +- .../Eigen/test/sparse_permutations.cpp | 202 ++ gtsam/3rdparty/Eigen/test/sparse_product.cpp | 122 +- gtsam/3rdparty/Eigen/test/sparse_solver.h | 324 ++++ gtsam/3rdparty/Eigen/test/sparse_solvers.cpp | 9 + gtsam/3rdparty/Eigen/test/sparse_vector.cpp | 11 +- gtsam/3rdparty/Eigen/test/superlu_support.cpp | 37 + gtsam/3rdparty/Eigen/test/triangular.cpp | 39 +- gtsam/3rdparty/Eigen/test/umfpack_support.cpp | 46 + gtsam/3rdparty/Eigen/test/vectorwiseop.cpp | 187 ++ gtsam/3rdparty/Eigen/test/zerosized.cpp | 5 + .../Eigen/unsupported/Eigen/AdolcForward | 29 +- gtsam/3rdparty/Eigen/unsupported/Eigen/BVH | 5 +- .../Eigen/unsupported/Eigen/CMakeLists.txt | 4 +- .../Eigen/unsupported/Eigen/CholmodSupport | 33 - gtsam/3rdparty/Eigen/unsupported/Eigen/FFT | 6 +- .../Eigen/unsupported/Eigen/IterativeSolvers | 16 +- .../Eigen/unsupported/Eigen/KroneckerProduct | 26 + .../Eigen/unsupported/Eigen/MPRealSupport | 20 +- .../Eigen/unsupported/Eigen/MatrixFunctions | 140 +- .../Eigen/unsupported/Eigen/MoreVectorization | 4 +- .../unsupported/Eigen/NonLinearOptimization | 5 - .../Eigen/unsupported/Eigen/NumericalDiff | 3 +- .../Eigen/unsupported/Eigen/Polynomials | 4 - .../3rdparty/Eigen/unsupported/Eigen/Skyline | 18 +- .../Eigen/unsupported/Eigen/SparseExtra | 42 +- .../Eigen/Splines} | 25 +- .../Eigen/unsupported/Eigen/SuperLUSupport | 35 - .../Eigen/unsupported/Eigen/UmfPackSupport | 33 - .../Eigen/src/AutoDiff/AutoDiffJacobian.h | 2 +- .../Eigen/src/AutoDiff/AutoDiffScalar.h | 171 +- .../unsupported/Eigen/src/BVH/BVAlgorithms.h | 4 + .../Eigen/unsupported/Eigen/src/BVH/KdBVH.h | 10 +- .../unsupported/Eigen/src/CMakeLists.txt | 2 + .../unsupported/Eigen/src/FFT/ei_fftw_impl.h | 4 + .../Eigen/src/FFT/ei_kissfft_impl.h | 5 +- .../IterativeSolvers/ConstrainedConjGrad.h | 4 + .../Eigen/src/IterativeSolvers/GMRES.h | 394 ++++ .../Eigen/src/IterativeSolvers/IncompleteLU.h | 128 ++ .../IterativeSolvers/IterationController.h | 4 + .../Eigen/src/IterativeSolvers/Scaling.h | 200 ++ .../Eigen/src/KroneckerProduct/CMakeLists.txt | 6 + .../KroneckerProduct/KroneckerTensorProduct.h | 172 ++ .../src/MatrixFunctions/MatrixExponential.h | 148 +- .../src/MatrixFunctions/MatrixFunction.h | 160 +- .../MatrixFunctions/MatrixFunctionAtomic.h | 4 + .../src/MatrixFunctions/MatrixLogarithm.h | 510 +++++ .../src/MatrixFunctions/MatrixSquareRoot.h | 499 +++++ .../Eigen/src/MatrixFunctions/StemFunction.h | 4 + .../src/MoreVectorization/MathFunctions.h | 6 +- .../HybridNonLinearSolver.h | 6 +- .../LevenbergMarquardt.h | 7 +- .../Eigen/src/NonLinearOptimization/chkder.h | 3 + .../Eigen/src/NonLinearOptimization/covar.h | 4 + .../Eigen/src/NonLinearOptimization/dogleg.h | 4 + .../Eigen/src/NonLinearOptimization/fdjac1.h | 4 + .../Eigen/src/NonLinearOptimization/lmpar.h | 4 + .../Eigen/src/NonLinearOptimization/qrsolv.h | 4 + .../Eigen/src/NonLinearOptimization/r1mpyq.h | 4 + .../Eigen/src/NonLinearOptimization/r1updt.h | 4 + .../Eigen/src/NonLinearOptimization/rwupdt.h | 3 + .../Eigen/src/NumericalDiff/NumericalDiff.h | 6 +- .../Eigen/src/Polynomials/Companion.h | 6 +- .../Eigen/src/Polynomials/PolynomialSolver.h | 4 + .../Eigen/src/Polynomials/PolynomialUtils.h | 3 + .../Eigen/src/Skyline/SkylineInplaceLU.h | 4 + .../Eigen/src/Skyline/SkylineMatrix.h | 4 + .../Eigen/src/Skyline/SkylineMatrixBase.h | 4 + .../Eigen/src/Skyline/SkylineProduct.h | 4 + .../Eigen/src/Skyline/SkylineStorage.h | 4 + .../Eigen/src/Skyline/SkylineUtil.h | 3 + .../SparseExtra/BlockOfDynamicSparseMatrix.h | 129 ++ .../src/SparseExtra/CholmodSupportLegacy.h | 517 ----- .../src/SparseExtra}/DynamicSparseMatrix.h | 48 +- .../Eigen/src/SparseExtra/MarketIO.h | 288 +++ .../src/SparseExtra/MatrixMarketIterator.h | 236 +++ .../Eigen/src/SparseExtra/RandomSetter.h | 28 +- .../src/SparseExtra/SimplicialCholesky.h | 477 ----- .../Eigen/src/SparseExtra/SparseLDLTLegacy.h | 414 ---- .../Eigen/src/SparseExtra/SparseLLT.h | 245 --- .../Eigen/src/SparseExtra/SparseLU.h | 163 -- .../Eigen/src/SparseExtra/SuperLUSupport.h | 667 ------- .../Eigen/src/SparseExtra/UmfPackSupport.h | 350 ---- .../Eigen/src/Splines/CMakeLists.txt | 6 + .../unsupported/Eigen/src/Splines/Spline.h | 479 +++++ .../Eigen/src/Splines/SplineFitting.h | 174 ++ .../unsupported/Eigen/src/Splines/SplineFwd.h | 101 + .../Eigen/unsupported/doc/Doxyfile.in | 9 +- .../doc/examples/MatrixLogarithm.cpp | 15 + .../doc/examples/MatrixSquareRoot.cpp | 16 + gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp | 16 +- .../Eigen/unsupported/test/CMakeLists.txt | 73 +- .../Eigen/unsupported/test/autodiff.cpp | 33 +- .../Eigen/unsupported/test/forward_adolc.cpp | 17 + .../3rdparty/Eigen/unsupported/test/gmres.cpp | 48 + .../unsupported/test/kronecker_product.cpp | 194 ++ .../unsupported/test/matrix_exponential.cpp | 5 +- .../unsupported/test/matrix_function.cpp | 21 + .../unsupported/test/matrix_square_root.cpp | 77 + .../Eigen/unsupported/test/mpreal/dlmalloc.c | 2 +- .../Eigen/unsupported/test/mpreal/mpreal.cpp | 342 ++-- .../Eigen/unsupported/test/mpreal/mpreal.h | 1696 ++++++----------- .../Eigen/unsupported/test/mpreal_support.cpp | 17 + .../unsupported/test/polynomialsolver.cpp | 30 - .../Eigen/unsupported/test/sparse_extra.cpp | 18 +- .../Eigen/unsupported/test/sparse_ldlt.cpp | 175 -- .../Eigen/unsupported/test/sparse_llt.cpp | 140 -- .../Eigen/unsupported/test/sparse_lu.cpp | 113 -- .../Eigen/unsupported/test/splines.cpp | 255 +++ 544 files changed, 27601 insertions(+), 14578 deletions(-) create mode 100644 gtsam/3rdparty/Eigen/COPYING.BSD create mode 100644 gtsam/3rdparty/Eigen/Eigen/CholmodSupport create mode 100644 gtsam/3rdparty/Eigen/Eigen/IterativeLinearSolvers create mode 100644 gtsam/3rdparty/Eigen/Eigen/OrderingMethods create mode 100644 gtsam/3rdparty/Eigen/Eigen/PaStiXSupport create mode 100644 gtsam/3rdparty/Eigen/Eigen/PardisoSupport create mode 100644 gtsam/3rdparty/Eigen/Eigen/SparseCholesky create mode 100644 gtsam/3rdparty/Eigen/Eigen/SparseCore create mode 100644 gtsam/3rdparty/Eigen/Eigen/SuperLUSupport create mode 100644 gtsam/3rdparty/Eigen/Eigen/UmfPackSupport create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CMakeLists.txt rename gtsam/3rdparty/Eigen/{unsupported/Eigen/src/SparseExtra => Eigen/src/CholmodSupport}/CholmodSupport.h (60%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/Assign_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Core/util/MKL_support.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU_MKL.h rename gtsam/3rdparty/Eigen/{unsupported/Eigen/src/SparseExtra => Eigen/src/OrderingMethods}/Amd.h (91%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR_MKL.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD_MKL.h delete mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Sparse/CMakeLists.txt delete mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h delete mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h delete mode 100644 gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTriangularView.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/AmbiVector.h (98%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CMakeLists.txt rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/CompressedStorage.h (95%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/CoreIterators.h (96%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/MappedSparseMatrix.h (74%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseAssign.h (100%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseBlock.h (71%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseCwiseBinaryOp.h (84%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseCwiseUnaryOp.h (50%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseDenseProduct.h (61%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseDiagonalProduct.h (99%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseDot.h (86%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseFuzzy.h (100%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrix.h rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseMatrixBase.h (55%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparsePermutation.h rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseProduct.h (68%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseRedux.h (97%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseSelfAdjointView.h (77%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseTranspose.h (73%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTriangularView.h rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseUtil.h (67%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseVector.h (70%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/SparseView.h (91%) rename gtsam/3rdparty/Eigen/Eigen/src/{Sparse => SparseCore}/TriangularSolver.h (89%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h rename gtsam/3rdparty/Eigen/{unsupported/Eigen/src/SparseExtra/Solve.h => Eigen/src/misc/SparseSolve.h} (70%) create mode 100644 gtsam/3rdparty/Eigen/Eigen/src/misc/blas.h delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/cmake/FindEigen3.cmake rename gtsam/3rdparty/Eigen/bench/btl/libs/{C_BLAS => BLAS}/CMakeLists.txt (69%) rename gtsam/3rdparty/Eigen/bench/btl/libs/{C_BLAS => BLAS}/blas.h (100%) create mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface.hh create mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface_impl.hh rename gtsam/3rdparty/Eigen/bench/btl/libs/{f77/f77_interface_base.hh => BLAS/c_interface_base.h} (51%) create mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/main.cpp delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/C/CMakeLists.txt delete mode 100755 gtsam/3rdparty/Eigen/bench/btl/libs/C/C_interface.hh delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/C_BLAS_interface.hh delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/cblas.h delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/main.cpp delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/STL_algo/CMakeLists.txt delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/STL_algo/STL_algo_interface.hh create mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/CMakeLists.txt rename gtsam/3rdparty/Eigen/bench/btl/libs/{f77/main.cpp => eigen2/btl_tiny_eigen2.cpp} (64%) create mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/eigen2_interface.hh create mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_adv.cpp rename gtsam/3rdparty/Eigen/bench/btl/libs/{STL_algo/main.cpp => eigen2/main_linear.cpp} (70%) rename gtsam/3rdparty/Eigen/bench/btl/libs/{f77/test_interface.hh => eigen2/main_matmat.cpp} (59%) rename gtsam/3rdparty/Eigen/bench/btl/libs/{C/main.cpp => eigen2/main_vecmat.cpp} (50%) delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/CMakeLists.txt delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/daat.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/data.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/daxpy.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f.mfr delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxv.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/f77_interface.hh delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/saat.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/sata.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/saxpy.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxm.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxv.f delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/CMakeLists.txt delete mode 100755 gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/hand_vec_interface.hh delete mode 100644 gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/main.cpp create mode 100644 gtsam/3rdparty/Eigen/bench/spbench/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/bench/spbench/spbenchsolver.cpp create mode 100644 gtsam/3rdparty/Eigen/bench/spbench/spbenchsolver.h create mode 100644 gtsam/3rdparty/Eigen/blas/BandTriangularSolver.h delete mode 100644 gtsam/3rdparty/Eigen/blas/ctbsv.f delete mode 100644 gtsam/3rdparty/Eigen/blas/dtbsv.f delete mode 100644 gtsam/3rdparty/Eigen/blas/stbsv.f delete mode 100644 gtsam/3rdparty/Eigen/blas/ztbsv.f create mode 100644 gtsam/3rdparty/Eigen/cmake/CMakeDetermineVSServicePack.cmake create mode 100644 gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake create mode 100644 gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake create mode 100644 gtsam/3rdparty/Eigen/cmake/FindMetis.cmake create mode 100644 gtsam/3rdparty/Eigen/cmake/FindPastix.cmake create mode 100644 gtsam/3rdparty/Eigen/cmake/FindScotch.cmake create mode 100644 gtsam/3rdparty/Eigen/cmake/language_support.cmake create mode 100644 gtsam/3rdparty/Eigen/doc/I16_TemplateKeyword.dox create mode 100644 gtsam/3rdparty/Eigen/doc/SparseQuickReference.dox create mode 100644 gtsam/3rdparty/Eigen/doc/TopicMultithreading.dox create mode 100644 gtsam/3rdparty/Eigen/doc/TutorialSparse_example_details.dox create mode 100644 gtsam/3rdparty/Eigen/doc/UsingIntelMKL.dox create mode 100644 gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_flexible.cpp create mode 100644 gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_simple.cpp create mode 100644 gtsam/3rdparty/Eigen/doc/snippets/Cwise_boolean_and.cpp create mode 100644 gtsam/3rdparty/Eigen/doc/snippets/Cwise_boolean_or.cpp create mode 100644 gtsam/3rdparty/Eigen/doc/snippets/LLT_example.cpp create mode 100644 gtsam/3rdparty/Eigen/doc/special_examples/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example.cpp create mode 100644 gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example_details.cpp create mode 100644 gtsam/3rdparty/Eigen/test/bicgstab.cpp create mode 100644 gtsam/3rdparty/Eigen/test/cholmod_support.cpp create mode 100644 gtsam/3rdparty/Eigen/test/conjugate_gradient.cpp delete mode 100644 gtsam/3rdparty/Eigen/test/gsl_helper.h create mode 100644 gtsam/3rdparty/Eigen/test/pardiso_support.cpp create mode 100644 gtsam/3rdparty/Eigen/test/pastix_support.cpp create mode 100644 gtsam/3rdparty/Eigen/test/simplicial_cholesky.cpp create mode 100644 gtsam/3rdparty/Eigen/test/sparse_permutations.cpp create mode 100644 gtsam/3rdparty/Eigen/test/sparse_solver.h create mode 100644 gtsam/3rdparty/Eigen/test/superlu_support.cpp create mode 100644 gtsam/3rdparty/Eigen/test/umfpack_support.cpp create mode 100644 gtsam/3rdparty/Eigen/test/vectorwiseop.cpp delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/CholmodSupport create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/KroneckerProduct rename gtsam/3rdparty/Eigen/{Eigen/src/Eigenvalues/EigenvaluesCommon.h => unsupported/Eigen/Splines} (63%) delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/SuperLUSupport delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/UmfPackSupport create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupportLegacy.h rename gtsam/3rdparty/Eigen/{Eigen/src/Sparse => unsupported/Eigen/src/SparseExtra}/DynamicSparseMatrix.h (87%) create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLLT.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLU.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h delete mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/Spline.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFitting.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h create mode 100644 gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixLogarithm.cpp create mode 100644 gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixSquareRoot.cpp create mode 100644 gtsam/3rdparty/Eigen/unsupported/test/gmres.cpp create mode 100644 gtsam/3rdparty/Eigen/unsupported/test/kronecker_product.cpp create mode 100644 gtsam/3rdparty/Eigen/unsupported/test/matrix_square_root.cpp delete mode 100644 gtsam/3rdparty/Eigen/unsupported/test/sparse_ldlt.cpp delete mode 100644 gtsam/3rdparty/Eigen/unsupported/test/sparse_llt.cpp delete mode 100644 gtsam/3rdparty/Eigen/unsupported/test/sparse_lu.cpp create mode 100644 gtsam/3rdparty/Eigen/unsupported/test/splines.cpp diff --git a/gtsam/3rdparty/Eigen/CMakeLists.txt b/gtsam/3rdparty/Eigen/CMakeLists.txt index 4a6a849e9..3ba310a27 100644 --- a/gtsam/3rdparty/Eigen/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/CMakeLists.txt @@ -64,6 +64,10 @@ set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) find_package(StandardMathLibrary) + +set(EIGEN_TEST_CUSTOM_LINKER_FLAGS "" CACHE STRING "Additional linker flags when linking unit tests.") +set(EIGEN_TEST_CUSTOM_CXX_FLAGS "" CACHE STRING "Additional compiler flags when compiling unit tests.") + set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "") if(NOT STANDARD_MATH_LIBRARY_FOUND) @@ -103,6 +107,8 @@ endif() add_definitions("-DEIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS") +set(EIGEN_TEST_MAX_SIZE "320" CACHE STRING "Maximal matrix/vector size, default is 320") + if(CMAKE_COMPILER_IS_GNUCXX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wnon-virtual-dtor -Wno-long-long -ansi -Wundef -Wcast-align -Wchar-subscripts -Wall -W -Wpointer-arith -Wwrite-strings -Wformat-security -fexceptions -fno-check-new -fno-common -fstrict-aliasing") set(CMAKE_CXX_FLAGS_DEBUG "-g3") @@ -158,7 +164,7 @@ if(CMAKE_COMPILER_IS_GNUCXX) option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF) if(EIGEN_TEST_NEON) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=softfp -mfpu=neon -mcpu=cortex-a8") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a8") message(STATUS "Enabling NEON in tests/examples") endif() @@ -301,44 +307,9 @@ add_subdirectory(Eigen) add_subdirectory(doc EXCLUDE_FROM_ALL) -add_custom_target(buildtests) -add_custom_target(check COMMAND "ctest") -add_dependencies(check buildtests) - -# CMake/Ctest does not allow us to change the build command, -# so we have to workaround by directly editing the generated DartConfiguration.tcl file -# save CMAKE_MAKE_PROGRAM -set(CMAKE_MAKE_PROGRAM_SAVE ${CMAKE_MAKE_PROGRAM}) -# and set a fake one -set(CMAKE_MAKE_PROGRAM "@EIGEN_MAKECOMMAND_PLACEHOLDER@") - -include(CTest) +include(EigenConfigureTesting) +# fixme, not sure this line is still needed: enable_testing() # must be called from the root CMakeLists, see man page -include(EigenTesting) -ei_init_testing() - -# overwrite default DartConfiguration.tcl -# The worarounds are different for each version of the MSVC IDE -if(MSVC_IDE) - if(MSVC_VERSION EQUAL 1600) # MSVC 2010 - set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests.vcxproj /p:Configuration=\${CTEST_CONFIGURATION_TYPE} \n # ") - else() # MSVC 2008 (TODO check MSVC 2005) - set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} /project buildtests") - endif() -else() - # for make and nmake - set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests") -endif() - -configure_file(${CMAKE_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl) -# restore default CMAKE_MAKE_PROGRAM -set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE}) -# un-set temporary variables so that it is like they never existed. -# CMake 2.6.3 introduces the more logical unset() syntax for this. -set(CMAKE_MAKE_PROGRAM_SAVE) -set(EIGEN_MAKECOMMAND_PLACEHOLDER) - -configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) if(EIGEN_LEAVE_TEST_IN_ALL_TARGET) @@ -347,15 +318,13 @@ else() add_subdirectory(test EXCLUDE_FROM_ALL) endif() -if(NOT MSVC) - if(EIGEN_LEAVE_TEST_IN_ALL_TARGET) - add_subdirectory(blas) - add_subdirectory(lapack) - else() - add_subdirectory(blas EXCLUDE_FROM_ALL) - add_subdirectory(lapack EXCLUDE_FROM_ALL) - endif() -endif(NOT MSVC) +if(EIGEN_LEAVE_TEST_IN_ALL_TARGET) + add_subdirectory(blas) + add_subdirectory(lapack) +else() + add_subdirectory(blas EXCLUDE_FROM_ALL) + add_subdirectory(lapack EXCLUDE_FROM_ALL) +endif() add_subdirectory(unsupported) @@ -369,6 +338,10 @@ if(EIGEN_BUILD_BTL) add_subdirectory(bench/btl EXCLUDE_FROM_ALL) endif(EIGEN_BUILD_BTL) +if(NOT WIN32) + add_subdirectory(bench/spbench EXCLUDE_FROM_ALL) +endif(NOT WIN32) + ei_testing_print_summary() message(STATUS "") diff --git a/gtsam/3rdparty/Eigen/COPYING.BSD b/gtsam/3rdparty/Eigen/COPYING.BSD new file mode 100644 index 000000000..11971ffe2 --- /dev/null +++ b/gtsam/3rdparty/Eigen/COPYING.BSD @@ -0,0 +1,26 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/CTestConfig.cmake b/gtsam/3rdparty/Eigen/CTestConfig.cmake index 7edc9d48d..a5a4eb012 100644 --- a/gtsam/3rdparty/Eigen/CTestConfig.cmake +++ b/gtsam/3rdparty/Eigen/CTestConfig.cmake @@ -8,6 +8,6 @@ set(CTEST_PROJECT_NAME "Eigen") set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") set(CTEST_DROP_METHOD "http") -set(CTEST_DROP_SITE "eigen.tuxfamily.org") +set(CTEST_DROP_SITE "manao.inria.fr") set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen") set(CTEST_DROP_SITE_CDASH TRUE) diff --git a/gtsam/3rdparty/Eigen/Eigen/Cholesky b/gtsam/3rdparty/Eigen/Eigen/Cholesky index 53f7bf911..f727f5d89 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Cholesky +++ b/gtsam/3rdparty/Eigen/Eigen/Cholesky @@ -5,8 +5,6 @@ #include "src/Core/util/DisableStupidWarnings.h" -namespace Eigen { - /** \defgroup Cholesky_Module Cholesky module * * @@ -24,8 +22,9 @@ namespace Eigen { #include "src/misc/Solve.h" #include "src/Cholesky/LLT.h" #include "src/Cholesky/LDLT.h" - -} // namespace Eigen +#ifdef EIGEN_USE_LAPACKE +#include "src/Cholesky/LLT_MKL.h" +#endif #include "src/Core/util/ReenableStupidWarnings.h" diff --git a/gtsam/3rdparty/Eigen/Eigen/CholmodSupport b/gtsam/3rdparty/Eigen/Eigen/CholmodSupport new file mode 100644 index 000000000..745b884e7 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/CholmodSupport @@ -0,0 +1,45 @@ +#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H +#define EIGEN_CHOLMODSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { + #include +} + +/** \ingroup Support_modules + * \defgroup CholmodSupport_Module CholmodSupport module + * + * This module provides an interface to the Cholmod library which is part of the suitesparse package. + * It provides the two following main factorization classes: + * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization. + * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial). + * + * For the sake of completeness, this module also propose the two following classes: + * - class CholmodSimplicialLLT + * - class CholmodSimplicialLDLT + * Note that these classes does not bring any particular advantage compared to the built-in + * SimplicialLLT and SimplicialLDLT factorization classes. + * + * \code + * #include + * \endcode + * + * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies. + * The dependencies depend on how cholmod has been compiled. + * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/CholmodSupport/CholmodSupport.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLMODSUPPORT_MODULE_H + diff --git a/gtsam/3rdparty/Eigen/Eigen/Core b/gtsam/3rdparty/Eigen/Eigen/Core index a5025e37e..0cf101636 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Core +++ b/gtsam/3rdparty/Eigen/Eigen/Core @@ -34,6 +34,12 @@ // defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization. #include "src/Core/util/Macros.h" +#include + +// this include file manages BLAS and MKL related macros +// and inclusion of their respective header files +#include "src/Core/util/MKL_support.h" + // if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into // account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks #if !EIGEN_ALIGN @@ -136,7 +142,7 @@ #endif // MSVC for windows mobile does not have the errno.h file -#if !(defined(_MSC_VER) && defined(_WIN32_WCE)) +#if !(defined(_MSC_VER) && defined(_WIN32_WCE)) && !defined(__ARMCC_VERSION) #define EIGEN_HAS_ERRNO #endif @@ -146,7 +152,6 @@ #include #include #include -#include #include #include #include @@ -175,9 +180,6 @@ #include #endif -// defined in bits/termios.h -#undef B0 - /** \brief Namespace containing all symbols from the %Eigen library. */ namespace Eigen { @@ -201,6 +203,8 @@ inline static const char *SimdInstructionSetsInUse(void) { #endif } +} // end namespace Eigen + #define STAGE10_FULL_EIGEN2_API 10 #define STAGE20_RESOLVE_API_CONFLICTS 20 #define STAGE30_FULL_EIGEN3_API 30 @@ -247,6 +251,10 @@ using std::ptrdiff_t; * \endcode */ +/** \defgroup Support_modules Support modules [category] + * Category of modules which add support for external libraries. + */ + #include "src/Core/util/Constants.h" #include "src/Core/util/ForwardDeclarations.h" #include "src/Core/util/Meta.h" @@ -318,15 +326,15 @@ using std::ptrdiff_t; #include "src/Core/CommaInitializer.h" #include "src/Core/Flagged.h" #include "src/Core/ProductBase.h" -#include "src/Core/Product.h" +#include "src/Core/GeneralProduct.h" #include "src/Core/TriangularMatrix.h" #include "src/Core/SelfAdjointView.h" -#include "src/Core/SolveTriangular.h" +#include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/Parallelizer.h" #include "src/Core/products/CoeffBasedProduct.h" -#include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/GeneralMatrixVector.h" #include "src/Core/products/GeneralMatrixMatrix.h" +#include "src/Core/SolveTriangular.h" #include "src/Core/products/GeneralMatrixMatrixTriangular.h" #include "src/Core/products/SelfadjointMatrixVector.h" #include "src/Core/products/SelfadjointMatrixMatrix.h" @@ -347,7 +355,20 @@ using std::ptrdiff_t; #include "src/Core/ArrayBase.h" #include "src/Core/ArrayWrapper.h" -} // namespace Eigen +#ifdef EIGEN_USE_BLAS +#include "src/Core/products/GeneralMatrixMatrix_MKL.h" +#include "src/Core/products/GeneralMatrixVector_MKL.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular_MKL.h" +#include "src/Core/products/SelfadjointMatrixMatrix_MKL.h" +#include "src/Core/products/SelfadjointMatrixVector_MKL.h" +#include "src/Core/products/TriangularMatrixMatrix_MKL.h" +#include "src/Core/products/TriangularMatrixVector_MKL.h" +#include "src/Core/products/TriangularSolverMatrix_MKL.h" +#endif // EIGEN_USE_BLAS + +#ifdef EIGEN_USE_MKL_VML +#include "src/Core/Assign_MKL.h" +#endif #include "src/Core/GlobalFunctions.h" diff --git a/gtsam/3rdparty/Eigen/Eigen/Eigen2Support b/gtsam/3rdparty/Eigen/Eigen/Eigen2Support index d96592a8d..c2aa2f618 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Eigen2Support +++ b/gtsam/3rdparty/Eigen/Eigen/Eigen2Support @@ -31,9 +31,8 @@ #include "src/Core/util/DisableStupidWarnings.h" -namespace Eigen { - -/** \defgroup Eigen2Support_Module Eigen2 support module +/** \ingroup Support_modules + * \defgroup Eigen2Support_Module Eigen2 support module * This module provides a couple of deprecated functions improving the compatibility with Eigen2. * * To use it, define EIGEN2_SUPPORT before including any Eigen header @@ -56,13 +55,29 @@ namespace Eigen { #include "src/Eigen2Support/MathFunctions.h" -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" // Eigen2 used to include iostream #include +#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ +using Eigen::Matrix##SizeSuffix##TypeSuffix; \ +using Eigen::Vector##SizeSuffix##TypeSuffix; \ +using Eigen::RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ + +#define EIGEN_USING_MATRIX_TYPEDEFS \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd) + #define USING_PART_OF_NAMESPACE_EIGEN \ EIGEN_USING_MATRIX_TYPEDEFS \ using Eigen::Matrix; \ diff --git a/gtsam/3rdparty/Eigen/Eigen/Eigenvalues b/gtsam/3rdparty/Eigen/Eigen/Eigenvalues index 250c0f466..af99ccd1f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Eigenvalues +++ b/gtsam/3rdparty/Eigen/Eigen/Eigenvalues @@ -9,8 +9,7 @@ #include "Jacobi" #include "Householder" #include "LU" - -namespace Eigen { +#include "Geometry" /** \defgroup Eigenvalues_Module Eigenvalues module * @@ -35,8 +34,11 @@ namespace Eigen { #include "src/Eigenvalues/ComplexSchur.h" #include "src/Eigenvalues/ComplexEigenSolver.h" #include "src/Eigenvalues/MatrixBaseEigenvalues.h" - -} // namespace Eigen +#ifdef EIGEN_USE_LAPACKE +#include "src/Eigenvalues/RealSchur_MKL.h" +#include "src/Eigenvalues/ComplexSchur_MKL.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver_MKL.h" +#endif #include "src/Core/util/ReenableStupidWarnings.h" diff --git a/gtsam/3rdparty/Eigen/Eigen/Geometry b/gtsam/3rdparty/Eigen/Eigen/Geometry index 78277c0c5..efd9d4504 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Geometry +++ b/gtsam/3rdparty/Eigen/Eigen/Geometry @@ -13,8 +13,6 @@ #define M_PI 3.14159265358979323846 #endif -namespace Eigen { - /** \defgroup Geometry_Module Geometry module * * @@ -58,8 +56,6 @@ namespace Eigen { #include "src/Eigen2Support/Geometry/All.h" #endif -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_GEOMETRY_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/Householder b/gtsam/3rdparty/Eigen/Eigen/Householder index 6b86cf65c..6e348db5c 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Householder +++ b/gtsam/3rdparty/Eigen/Eigen/Householder @@ -5,8 +5,6 @@ #include "src/Core/util/DisableStupidWarnings.h" -namespace Eigen { - /** \defgroup Householder_Module Householder module * This module provides Householder transformations. * @@ -19,8 +17,6 @@ namespace Eigen { #include "src/Householder/HouseholderSequence.h" #include "src/Householder/BlockHouseholder.h" -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_HOUSEHOLDER_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/IterativeLinearSolvers b/gtsam/3rdparty/Eigen/Eigen/IterativeLinearSolvers new file mode 100644 index 000000000..315c2dd1e --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/IterativeLinearSolvers @@ -0,0 +1,40 @@ +#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H +#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Sparse_modules + * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module + * + * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse. + * Those solvers are accessible via the following classes: + * - ConjugateGradient for selfadjoint (hermitian) matrices, + * - BiCGSTAB for general square matrices. + * + * These iterative solvers are associated with some preconditioners: + * - IdentityPreconditioner - not really useful + * - DiagonalPreconditioner - also called JAcobi preconditioner, work very well on diagonal dominant matrices. + * - IncompleteILUT - incomplete LU factorization with dual thresholding + * + * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport. + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/IterativeLinearSolvers/IterativeSolverBase.h" +#include "src/IterativeLinearSolvers/BasicPreconditioners.h" +#include "src/IterativeLinearSolvers/ConjugateGradient.h" +#include "src/IterativeLinearSolvers/BiCGSTAB.h" +#include "src/IterativeLinearSolvers/IncompleteLUT.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/Jacobi b/gtsam/3rdparty/Eigen/Eigen/Jacobi index afa676813..ba8a4dc36 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Jacobi +++ b/gtsam/3rdparty/Eigen/Eigen/Jacobi @@ -5,8 +5,6 @@ #include "src/Core/util/DisableStupidWarnings.h" -namespace Eigen { - /** \defgroup Jacobi_Module Jacobi module * This module provides Jacobi and Givens rotations. * @@ -21,8 +19,6 @@ namespace Eigen { #include "src/Jacobi/Jacobi.h" -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_JACOBI_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/LU b/gtsam/3rdparty/Eigen/Eigen/LU index 226f88ca3..db5795504 100644 --- a/gtsam/3rdparty/Eigen/Eigen/LU +++ b/gtsam/3rdparty/Eigen/Eigen/LU @@ -5,8 +5,6 @@ #include "src/Core/util/DisableStupidWarnings.h" -namespace Eigen { - /** \defgroup LU_Module LU module * This module includes %LU decomposition and related notions such as matrix inversion and determinant. * This module defines the following MatrixBase methods: @@ -23,6 +21,9 @@ namespace Eigen { #include "src/misc/Image.h" #include "src/LU/FullPivLU.h" #include "src/LU/PartialPivLU.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/LU/PartialPivLU_MKL.h" +#endif #include "src/LU/Determinant.h" #include "src/LU/Inverse.h" @@ -34,8 +35,6 @@ namespace Eigen { #include "src/Eigen2Support/LU.h" #endif -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_LU_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/LeastSquares b/gtsam/3rdparty/Eigen/Eigen/LeastSquares index 93a6302dc..35137c25d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/LeastSquares +++ b/gtsam/3rdparty/Eigen/Eigen/LeastSquares @@ -15,8 +15,6 @@ #include "Eigenvalues" #include "Geometry" -namespace Eigen { - /** \defgroup LeastSquares_Module LeastSquares module * This module provides linear regression and related features. * @@ -27,8 +25,6 @@ namespace Eigen { #include "src/Eigen2Support/LeastSquares.h" -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN2_SUPPORT diff --git a/gtsam/3rdparty/Eigen/Eigen/OrderingMethods b/gtsam/3rdparty/Eigen/Eigen/OrderingMethods new file mode 100644 index 000000000..1e2d87452 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/OrderingMethods @@ -0,0 +1,23 @@ +#ifndef EIGEN_ORDERINGMETHODS_MODULE_H +#define EIGEN_ORDERINGMETHODS_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Sparse_modules + * \defgroup OrderingMethods_Module OrderingMethods module + * + * This module is currently for internal use only. + * + * + * \code + * #include + * \endcode + */ + +#include "src/OrderingMethods/Amd.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ORDERINGMETHODS_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/PaStiXSupport b/gtsam/3rdparty/Eigen/Eigen/PaStiXSupport new file mode 100644 index 000000000..7c616ee5e --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/PaStiXSupport @@ -0,0 +1,46 @@ +#ifndef EIGEN_PASTIXSUPPORT_MODULE_H +#define EIGEN_PASTIXSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include +extern "C" { +#include +#include +} + +#ifdef complex +#undef complex +#endif + +/** \ingroup Support_modules + * \defgroup PaStiXSupport_Module PaStiXSupport module + * + * This module provides an interface to the PaSTiX library. + * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver. + * It provides the two following main factorization classes: + * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization. + * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization. + * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern). + * + * \code + * #include + * \endcode + * + * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies. + * The dependencies depend on how PaSTiX has been compiled. + * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/PaStiXSupport/PaStiXSupport.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PASTIXSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/PardisoSupport b/gtsam/3rdparty/Eigen/Eigen/PardisoSupport new file mode 100644 index 000000000..99330ce7a --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/PardisoSupport @@ -0,0 +1,30 @@ +#ifndef EIGEN_PARDISOSUPPORT_MODULE_H +#define EIGEN_PARDISOSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include + +#include + +/** \ingroup Support_modules + * \defgroup PardisoSupport_Module PardisoSupport module + * + * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers. + * + * \code + * #include + * \endcode + * + * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies. + * See this \ref TopicUsingIntelMKL "page" for more information on MKL-Eigen integration. + * + */ + +#include "src/PardisoSupport/PardisoSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PARDISOSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/QR b/gtsam/3rdparty/Eigen/Eigen/QR index 97c1788ee..ac5b02693 100644 --- a/gtsam/3rdparty/Eigen/Eigen/QR +++ b/gtsam/3rdparty/Eigen/Eigen/QR @@ -9,8 +9,6 @@ #include "Jacobi" #include "Householder" -namespace Eigen { - /** \defgroup QR_Module QR module * * @@ -28,13 +26,15 @@ namespace Eigen { #include "src/QR/HouseholderQR.h" #include "src/QR/FullPivHouseholderQR.h" #include "src/QR/ColPivHouseholderQR.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/QR/HouseholderQR_MKL.h" +#include "src/QR/ColPivHouseholderQR_MKL.h" +#endif #ifdef EIGEN2_SUPPORT #include "src/Eigen2Support/QR.h" #endif -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #ifdef EIGEN2_SUPPORT diff --git a/gtsam/3rdparty/Eigen/Eigen/SVD b/gtsam/3rdparty/Eigen/Eigen/SVD index 7c987a9dd..fd310017a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/SVD +++ b/gtsam/3rdparty/Eigen/Eigen/SVD @@ -7,8 +7,6 @@ #include "src/Core/util/DisableStupidWarnings.h" -namespace Eigen { - /** \defgroup SVD_Module SVD module * * @@ -24,14 +22,15 @@ namespace Eigen { #include "src/misc/Solve.h" #include "src/SVD/JacobiSVD.h" +#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) +#include "src/SVD/JacobiSVD_MKL.h" +#endif #include "src/SVD/UpperBidiagonalization.h" #ifdef EIGEN2_SUPPORT #include "src/Eigen2Support/SVD.h" #endif -} // namespace Eigen - #include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SVD_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/Sparse b/gtsam/3rdparty/Eigen/Eigen/Sparse index 7425b3a41..2d1757172 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Sparse +++ b/gtsam/3rdparty/Eigen/Eigen/Sparse @@ -1,69 +1,23 @@ #ifndef EIGEN_SPARSE_MODULE_H #define EIGEN_SPARSE_MODULE_H -#include "Core" - -#include "src/Core/util/DisableStupidWarnings.h" - -#include -#include -#include -#include -#include - -#ifdef EIGEN2_SUPPORT -#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET -#endif - -#ifndef EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET -#error The sparse module API is not stable yet. To use it anyway, please define the EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET preprocessor token. -#endif - -namespace Eigen { - -/** \defgroup Sparse_Module Sparse module +/** \defgroup Sparse_modules Sparse modules * - * - * - * See the \ref TutorialSparse "Sparse tutorial" + * Meta-module including all related modules: + * - SparseCore + * - OrderingMethods + * - SparseCholesky + * - IterativeLinearSolvers * * \code * #include * \endcode */ -/** The type used to identify a general sparse storage. */ -struct Sparse {}; - -#include "src/Sparse/SparseUtil.h" -#include "src/Sparse/SparseMatrixBase.h" -#include "src/Sparse/CompressedStorage.h" -#include "src/Sparse/AmbiVector.h" -#include "src/Sparse/SparseMatrix.h" -#include "src/Sparse/DynamicSparseMatrix.h" -#include "src/Sparse/MappedSparseMatrix.h" -#include "src/Sparse/SparseVector.h" -#include "src/Sparse/CoreIterators.h" -#include "src/Sparse/SparseBlock.h" -#include "src/Sparse/SparseTranspose.h" -#include "src/Sparse/SparseCwiseUnaryOp.h" -#include "src/Sparse/SparseCwiseBinaryOp.h" -#include "src/Sparse/SparseDot.h" -#include "src/Sparse/SparseAssign.h" -#include "src/Sparse/SparseRedux.h" -#include "src/Sparse/SparseFuzzy.h" -#include "src/Sparse/SparseProduct.h" -#include "src/Sparse/SparseSparseProduct.h" -#include "src/Sparse/SparseDenseProduct.h" -#include "src/Sparse/SparseDiagonalProduct.h" -#include "src/Sparse/SparseTriangularView.h" -#include "src/Sparse/SparseSelfAdjointView.h" -#include "src/Sparse/TriangularSolver.h" -#include "src/Sparse/SparseView.h" - -} // namespace Eigen - -#include "src/Core/util/ReenableStupidWarnings.h" +#include "SparseCore" +#include "OrderingMethods" +#include "SparseCholesky" +#include "IterativeLinearSolvers" #endif // EIGEN_SPARSE_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/SparseCholesky b/gtsam/3rdparty/Eigen/Eigen/SparseCholesky new file mode 100644 index 000000000..5f82742f7 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/SparseCholesky @@ -0,0 +1,30 @@ +#ifndef EIGEN_SPARSECHOLESKY_MODULE_H +#define EIGEN_SPARSECHOLESKY_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Sparse_modules + * \defgroup SparseCholesky_Module SparseCholesky module + * + * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are accessible via the following classes: + * - SimplicialLLt, + * - SimplicialLDLt + * + * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module. + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/SparseCholesky/SimplicialCholesky.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECHOLESKY_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/SparseCore b/gtsam/3rdparty/Eigen/Eigen/SparseCore new file mode 100644 index 000000000..41d28c928 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/SparseCore @@ -0,0 +1,66 @@ +#ifndef EIGEN_SPARSECORE_MODULE_H +#define EIGEN_SPARSECORE_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include +#include + +/** \ingroup Sparse_modules + * \defgroup SparseCore_Module SparseCore module + * + * This module provides a sparse matrix representation, and basic associatd matrix manipulations + * and operations. + * + * See the \ref TutorialSparse "Sparse tutorial" + * + * \code + * #include + * \endcode + * + * This module depends on: Core. + */ + +namespace Eigen { + +/** The type used to identify a general sparse storage. */ +struct Sparse {}; + +} + +#include "src/SparseCore/SparseUtil.h" +#include "src/SparseCore/SparseMatrixBase.h" +#include "src/SparseCore/CompressedStorage.h" +#include "src/SparseCore/AmbiVector.h" +#include "src/SparseCore/SparseMatrix.h" +#include "src/SparseCore/MappedSparseMatrix.h" +#include "src/SparseCore/SparseVector.h" +#include "src/SparseCore/CoreIterators.h" +#include "src/SparseCore/SparseBlock.h" +#include "src/SparseCore/SparseTranspose.h" +#include "src/SparseCore/SparseCwiseUnaryOp.h" +#include "src/SparseCore/SparseCwiseBinaryOp.h" +#include "src/SparseCore/SparseDot.h" +#include "src/SparseCore/SparsePermutation.h" +#include "src/SparseCore/SparseAssign.h" +#include "src/SparseCore/SparseRedux.h" +#include "src/SparseCore/SparseFuzzy.h" +#include "src/SparseCore/ConservativeSparseSparseProduct.h" +#include "src/SparseCore/SparseSparseProductWithPruning.h" +#include "src/SparseCore/SparseProduct.h" +#include "src/SparseCore/SparseDenseProduct.h" +#include "src/SparseCore/SparseDiagonalProduct.h" +#include "src/SparseCore/SparseTriangularView.h" +#include "src/SparseCore/SparseSelfAdjointView.h" +#include "src/SparseCore/TriangularSolver.h" +#include "src/SparseCore/SparseView.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECORE_MODULE_H + diff --git a/gtsam/3rdparty/Eigen/Eigen/SuperLUSupport b/gtsam/3rdparty/Eigen/Eigen/SuperLUSupport new file mode 100644 index 000000000..575e14fbc --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/SuperLUSupport @@ -0,0 +1,59 @@ +#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H +#define EIGEN_SUPERLUSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#ifdef EMPTY +#define EIGEN_EMPTY_WAS_ALREADY_DEFINED +#endif + +typedef int int_t; +#include +#include +#include + +// slu_util.h defines a preprocessor token named EMPTY which is really polluting, +// so we remove it in favor of a SUPERLU_EMPTY token. +// If EMPTY was already defined then we don't undef it. + +#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED) +# undef EIGEN_EMPTY_WAS_ALREADY_DEFINED +#elif defined(EMPTY) +# undef EMPTY +#endif + +#define SUPERLU_EMPTY (-1) + +namespace Eigen { struct SluMatrix; } + +/** \ingroup Support_modules + * \defgroup SuperLUSupport_Module SuperLUSupport module + * + * This module provides an interface to the SuperLU library. + * It provides the following factorization class: + * - class SuperLU: a supernodal sequential LU factorization. + * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods). + * + * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting. + * + * \code + * #include + * \endcode + * + * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be linked to the superlu library and its dependencies. + * The dependencies depend on how superlu has been compiled. + * For a cmake based project, you can use our FindSuperLU.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/SuperLUSupport/SuperLUSupport.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SUPERLUSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/UmfPackSupport b/gtsam/3rdparty/Eigen/Eigen/UmfPackSupport new file mode 100644 index 000000000..984f64a84 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/UmfPackSupport @@ -0,0 +1,36 @@ +#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H +#define EIGEN_UMFPACKSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + +/** \ingroup Support_modules + * \defgroup UmfPackSupport_Module UmfPackSupport module + * + * This module provides an interface to the UmfPack library which is part of the suitesparse package. + * It provides the following factorization class: + * - class UmfPackLU: a multifrontal sequential LU factorization. + * + * \code + * #include + * \endcode + * + * In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be linked to the umfpack library and its dependencies. + * The dependencies depend on how umfpack has been compiled. + * For a cmake based project, you can use our FindUmfPack.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/UmfPackSupport/UmfPackSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_UMFPACKSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LDLT.h b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LDLT.h index f47b2ea56..a5e3d5469 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LDLT.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LDLT.h @@ -1,9 +1,10 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2008-2011 Gael Guennebaud // Copyright (C) 2009 Keir Mierle // Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2011 Timothy E. Holy // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -27,17 +28,21 @@ #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H +namespace Eigen { + namespace internal { template struct LDLT_Traits; } -/** \ingroup cholesky_Module +/** \ingroup Cholesky_Module * * \class LDLT * * \brief Robust Cholesky decomposition of a matrix with pivoting * * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition + * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. + * The other triangular part won't be read. * * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L @@ -48,14 +53,10 @@ template struct LDLT_Traits; * on D also stabilizes the computation. * * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky - * decomposition to determine whether a system of equations has a solution. + * decomposition to determine whether a system of equations has a solution. * * \sa MatrixBase::ldlt(), class LLT */ - /* THIS PART OF THE DOX IS CURRENTLY DISABLED BECAUSE INACCURATE BECAUSE OF BUG IN THE DECOMPOSITION CODE - * Note that during the decomposition, only the upper triangular part of A is considered. Therefore, - * the strict lower part does not have to store correct values. - */ template class LDLT { public: @@ -98,6 +99,11 @@ template class LDLT m_isInitialized(false) {} + /** \brief Constructor with decomposition + * + * This calculates the decomposition for the input \a matrix. + * \sa LDLT(Index size) + */ LDLT(const MatrixType& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), @@ -107,6 +113,14 @@ template class LDLT compute(matrix); } + /** Clear any existing decomposition + * \sa rankUpdate(w,sigma) + */ + void setZero() + { + m_isInitialized = false; + } + /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { @@ -130,14 +144,14 @@ template class LDLT } /** \returns the coefficients of the diagonal matrix D */ - inline Diagonal vectorD(void) const + inline Diagonal vectorD() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ - inline bool isPositive(void) const + inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == 1; @@ -196,6 +210,9 @@ template class LDLT LDLT& compute(const MatrixType& matrix); + template + LDLT& rankUpdate(const MatrixBase& w,RealScalar alpha=1); + /** \returns the internal LDLT decomposition matrix * * TODO: document the storage layout @@ -211,6 +228,17 @@ template class LDLT inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Success; + } + protected: /** \internal @@ -249,7 +277,7 @@ template<> struct ldlt_inplace return true; } - RealScalar cutoff = 0, biggest_in_corner; + RealScalar cutoff(0), biggest_in_corner; for (Index k = 0; k < size; ++k) { @@ -317,6 +345,61 @@ template<> struct ldlt_inplace return true; } + + // Reference for the algorithm: Davis and Hager, "Multiple Rank + // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) + // Trivial rearrangements of their computations (Timothy E. Holy) + // allow their algorithm to work for rank-1 updates even if the + // original matrix is not of full rank. + // Here only rank-1 updates are implemented, to reduce the + // requirement for intermediate storage and improve accuracy + template + static bool updateInPlace(MatrixType& mat, MatrixBase& w, typename MatrixType::RealScalar sigma=1) + { + using internal::isfinite; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + + const Index size = mat.rows(); + eigen_assert(mat.cols() == size && w.size()==size); + + RealScalar alpha = 1; + + // Apply the update + for (Index j = 0; j < size; j++) + { + // Check for termination due to an original decomposition of low-rank + if (!isfinite(alpha)) + break; + + // Update the diagonal terms + RealScalar dj = real(mat.coeff(j,j)); + Scalar wj = w.coeff(j); + RealScalar swj2 = sigma*abs2(wj); + RealScalar gamma = dj*alpha + swj2; + + mat.coeffRef(j,j) += swj2/alpha; + alpha += swj2/dj; + + + // Update the terms of L + Index rs = size-j-1; + w.tail(rs) -= wj * mat.col(j).tail(rs); + if(gamma != 0) + mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs); + } + return true; + } + + template + static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1) + { + // Apply the permutation to the input w + tmp = transpositions * w; + + return ldlt_inplace::updateInPlace(mat,tmp,sigma); + } }; template<> struct ldlt_inplace @@ -327,22 +410,29 @@ template<> struct ldlt_inplace Transpose matt(mat); return ldlt_inplace::unblocked(matt, transpositions, temp, sign); } + + template + static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1) + { + Transpose matt(mat); + return ldlt_inplace::update(matt, transpositions, tmp, w.conjugate(), sigma); + } }; template struct LDLT_Traits { - typedef TriangularView MatrixL; - typedef TriangularView MatrixU; - inline static MatrixL getL(const MatrixType& m) { return m; } - inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); } + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m; } + static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } }; template struct LDLT_Traits { - typedef TriangularView MatrixL; - typedef TriangularView MatrixU; - inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); } - inline static MatrixU getU(const MatrixType& m) { return m; } + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } + static inline MatrixU getU(const MatrixType& m) { return m; } }; } // end namespace internal @@ -367,6 +457,37 @@ LDLT& LDLT::compute(const MatrixType& a) return *this; } +/** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. + * \param w a vector to be incorporated into the decomposition. + * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. + * \sa setZero() + */ +template +template +LDLT& LDLT::rankUpdate(const MatrixBase& w,typename NumTraits::Real sigma) +{ + const Index size = w.rows(); + if (m_isInitialized) + { + eigen_assert(m_matrix.rows()==size); + } + else + { + m_matrix.resize(size,size); + m_matrix.setZero(); + m_transpositions.resize(size); + for (Index i = 0; i < size; i++) + m_transpositions.coeffRef(i) = i; + m_temporary.resize(size); + m_sign = sigma>=0 ? 1 : -1; + m_isInitialized = true; + } + + internal::ldlt_inplace::update(m_matrix, m_transpositions, m_temporary, w, sigma); + + return *this; +} + namespace internal { template struct solve_retval, Rhs> @@ -481,4 +602,6 @@ MatrixBase::ldlt() const return LDLT(derived()); } +} // end namespace Eigen + #endif // EIGEN_LDLT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h index a4ee5b11c..17c6d6b8d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h @@ -25,17 +25,21 @@ #ifndef EIGEN_LLT_H #define EIGEN_LLT_H +namespace Eigen { + namespace internal{ template struct LLT_Traits; } -/** \ingroup cholesky_Module +/** \ingroup Cholesky_Module * * \class LLT * * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features * * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition + * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. + * The other triangular part won't be read. * * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite * matrix A such that A = LL^* = U^*U, where L is lower triangular. @@ -49,6 +53,9 @@ template struct LLT_Traits; * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations * has a solution. * + * Example: \include LLT_example.cpp + * Output: \verbinclude LLT_example.out + * * \sa MatrixBase::llt(), class LDLT */ /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) @@ -178,6 +185,9 @@ template class LLT inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } + template + LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); + protected: /** \internal * Used to compute and store L @@ -190,16 +200,85 @@ template class LLT namespace internal { -template struct llt_inplace; +template struct llt_inplace; -template<> struct llt_inplace +template +static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) { + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::ColXpr ColXpr; + typedef typename internal::remove_all::type ColXprCleaned; + typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; + typedef Matrix TempVectorType; + typedef typename TempVectorType::SegmentReturnType TempVecSegment; + + int n = mat.cols(); + eigen_assert(mat.rows()==n && vec.size()==n); + + TempVectorType temp; + + if(sigma>0) + { + // This version is based on Givens rotations. + // It is faster than the other one below, but only works for updates, + // i.e., for sigma > 0 + temp = sqrt(sigma) * vec; + + for(int i=0; i g; + g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); + + int rs = n-i-1; + if(rs>0) + { + ColXprSegment x(mat.col(i).tail(rs)); + TempVecSegment y(temp.tail(rs)); + apply_rotation_in_the_plane(x, y, g); + } + } + } + else + { + temp = vec; + RealScalar beta = 1; + for(int j=0; j struct llt_inplace +{ + typedef typename NumTraits::Real RealScalar; template static typename MatrixType::Index unblocked(MatrixType& mat) { typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); @@ -254,55 +333,71 @@ template<> struct llt_inplace } return -1; } -}; -template<> struct llt_inplace + template + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + { + return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); + } +}; + +template struct llt_inplace { + typedef typename NumTraits::Real RealScalar; + template static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat) { Transpose matt(mat); - return llt_inplace::unblocked(matt); + return llt_inplace::unblocked(matt); } template static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat) { Transpose matt(mat); - return llt_inplace::blocked(matt); + return llt_inplace::blocked(matt); + } + template + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + { + Transpose matt(mat); + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); } }; template struct LLT_Traits { - typedef TriangularView MatrixL; - typedef TriangularView MatrixU; - inline static MatrixL getL(const MatrixType& m) { return m; } - inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); } + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m; } + static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } static bool inplace_decomposition(MatrixType& m) - { return llt_inplace::blocked(m)==-1; } + { return llt_inplace::blocked(m)==-1; } }; template struct LLT_Traits { - typedef TriangularView MatrixL; - typedef TriangularView MatrixU; - inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); } - inline static MatrixU getU(const MatrixType& m) { return m; } + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } + static inline MatrixU getU(const MatrixType& m) { return m; } static bool inplace_decomposition(MatrixType& m) - { return llt_inplace::blocked(m)==-1; } + { return llt_inplace::blocked(m)==-1; } }; } // end namespace internal /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix - * * * \returns a reference to *this + * + * Example: \include TutorialLinAlgComputeTwice.cpp + * Output: \verbinclude TutorialLinAlgComputeTwice.out */ template LLT& LLT::compute(const MatrixType& a) { - assert(a.rows()==a.cols()); + eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); m_matrix = a; @@ -314,6 +409,26 @@ LLT& LLT::compute(const MatrixType& a) return *this; } +/** Performs a rank one update (or dowdate) of the current decomposition. + * If A = LL^* before the rank one update, + * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector + * of same dimension. + */ +template +template +LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); + eigen_assert(v.size()==m_matrix.cols()); + eigen_assert(m_isInitialized); + if(internal::llt_inplace::rankUpdate(m_matrix,v,sigma)>=0) + m_info = NumericalIssue; + else + m_info = Success; + + return *this; +} + namespace internal { template struct solve_retval, Rhs> @@ -383,4 +498,6 @@ SelfAdjointView::llt() const return LLT(m_matrix); } +} // end namespace Eigen + #endif // EIGEN_LLT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT_MKL.h new file mode 100644 index 000000000..64daa445c --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT_MKL.h @@ -0,0 +1,102 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * LLt decomposition based on LAPACKE_?potrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_LLT_MKL_H +#define EIGEN_LLT_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" +#include + +namespace Eigen { + +namespace internal { + +template struct mkl_llt; + +#define EIGEN_MKL_LLT(EIGTYPE, MKLTYPE, MKLPREFIX) \ +template<> struct mkl_llt \ +{ \ + template \ + static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \ + { \ + lapack_int matrix_order; \ + lapack_int size, lda, info, StorageOrder; \ + EIGTYPE* a; \ + eigen_assert(m.rows()==m.cols()); \ + /* Set up parameters for ?potrf */ \ + size = m.rows(); \ + StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ + matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + a = &(m.coeffRef(0,0)); \ + lda = m.outerStride(); \ +\ + info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \ + info = (info==0) ? Success : NumericalIssue; \ + return info; \ + } \ +}; \ +template<> struct llt_inplace \ +{ \ + template \ + static typename MatrixType::Index blocked(MatrixType& m) \ + { \ + return mkl_llt::potrf(m, 'L'); \ + } \ + template \ + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ +}; \ +template<> struct llt_inplace \ +{ \ + template \ + static typename MatrixType::Index blocked(MatrixType& m) \ + { \ + return mkl_llt::potrf(m, 'U'); \ + } \ + template \ + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + { \ + Transpose matt(mat); \ + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); \ + } \ +}; + +EIGEN_MKL_LLT(double, double, d) +EIGEN_MKL_LLT(float, float, s) +EIGEN_MKL_LLT(dcomplex, MKL_Complex16, z) +EIGEN_MKL_LLT(scomplex, MKL_Complex8, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LLT_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CMakeLists.txt new file mode 100644 index 000000000..814dfa613 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_CholmodSupport_SRCS "*.h") + +INSTALL(FILES + ${Eigen_CholmodSupport_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/CholmodSupport COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CholmodSupport.h similarity index 60% rename from gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupport.h rename to gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CholmodSupport.h index 3e502c0aa..a06c429f0 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupport.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/CholmodSupport/CholmodSupport.h @@ -25,6 +25,8 @@ #ifndef EIGEN_CHOLMODSUPPORT_H #define EIGEN_CHOLMODSUPPORT_H +namespace Eigen { + namespace internal { template @@ -69,11 +71,20 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) res.nzmax = mat.nonZeros(); res.nrow = mat.rows();; res.ncol = mat.cols(); - res.p = mat._outerIndexPtr(); - res.i = mat._innerIndexPtr(); - res.x = mat._valuePtr(); + res.p = mat.outerIndexPtr(); + res.i = mat.innerIndexPtr(); + res.x = mat.valuePtr(); res.sorted = 1; - res.packed = 1; + if(mat.isCompressed()) + { + res.packed = 1; + } + else + { + res.packed = 0; + res.nz = mat.innerNonZeroPtr(); + } + res.dtype = 0; res.stype = -1; @@ -149,19 +160,14 @@ enum CholmodMode { CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt }; -/** \brief A Cholesky factorization and solver based on Cholmod - * - * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization - * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices - * X and B can be either dense or sparse. - * - * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> - * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower - * or Upper. Default is Lower. - * + +/** \ingroup CholmodSupport_Module + * \class CholmodBase + * \brief The base class for the direct Cholesky factorization of Cholmod + * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT */ -template -class CholmodDecomposition +template +class CholmodBase : internal::noncopyable { public: typedef _MatrixType MatrixType; @@ -173,21 +179,20 @@ class CholmodDecomposition public: - CholmodDecomposition() + CholmodBase() : m_cholmodFactor(0), m_info(Success), m_isInitialized(false) { cholmod_start(&m_cholmod); - setMode(CholmodLDLt); } - CholmodDecomposition(const MatrixType& matrix) + CholmodBase(const MatrixType& matrix) : m_cholmodFactor(0), m_info(Success), m_isInitialized(false) { cholmod_start(&m_cholmod); compute(matrix); } - ~CholmodDecomposition() + ~CholmodBase() { if(m_cholmodFactor) cholmod_free_factor(&m_cholmodFactor, &m_cholmod); @@ -197,31 +202,8 @@ class CholmodDecomposition inline Index cols() const { return m_cholmodFactor->n; } inline Index rows() const { return m_cholmodFactor->n; } - void setMode(CholmodMode mode) - { - switch(mode) - { - case CholmodAuto: - m_cholmod.final_asis = 1; - m_cholmod.supernodal = CHOLMOD_AUTO; - break; - case CholmodSimplicialLLt: - m_cholmod.final_asis = 0; - m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; - m_cholmod.final_ll = 1; - break; - case CholmodSupernodalLLt: - m_cholmod.final_asis = 1; - m_cholmod.supernodal = CHOLMOD_SUPERNODAL; - break; - case CholmodLDLt: - m_cholmod.final_asis = 1; - m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; - break; - default: - break; - } - } + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } /** \brief Reports whether previous computation was successful. * @@ -235,10 +217,11 @@ class CholmodDecomposition } /** Computes the sparse Cholesky decomposition of \a matrix */ - void compute(const MatrixType& matrix) + Derived& compute(const MatrixType& matrix) { analyzePattern(matrix); factorize(matrix); + return derived(); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. @@ -246,13 +229,13 @@ class CholmodDecomposition * \sa compute() */ template - inline const internal::solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(rows()==b.rows() && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); + return internal::solve_retval(*this, b.derived()); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. @@ -260,13 +243,13 @@ class CholmodDecomposition * \sa compute() */ template - inline const internal::sparse_solve_retval + inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(rows()==b.rows() && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b"); - return internal::sparse_solve_retval(*this, b.derived()); + return internal::sparse_solve_retval(*this, b.derived()); } /** Performs a symbolic decomposition on the sparcity of \a matrix. @@ -356,7 +339,7 @@ class CholmodDecomposition template void dumpMemory(Stream& s) {} - + protected: mutable cholmod_common m_cholmod; cholmod_factor* m_cholmodFactor; @@ -366,13 +349,223 @@ class CholmodDecomposition int m_analysisIsOk; }; +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLLT + * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest. + * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT + */ +template +class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSimplicialLLT() : Base() { init(); } + + CholmodSimplicialLLT(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodSimplicialLLT() {} + protected: + void init() + { + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + } +}; + + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLDLT + * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest. + * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT + */ +template +class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSimplicialLDLT() : Base() { init(); } + + CholmodSimplicialLDLT(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodSimplicialLDLT() {} + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSupernodalLLT + * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization + * using the Cholmod library. + * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. + * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSupernodalLLT() : Base() { init(); } + + CholmodSupernodalLLT(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodSupernodalLLT() {} + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodDecomposition + * \brief A general Cholesky factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization + * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * This variant permits to change the underlying Cholesky method at runtime. + * On the other hand, it does not provide access to the result of the factorization. + * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodDecomposition() : Base() { init(); } + + CholmodDecomposition(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodDecomposition() {} + + void setMode(CholmodMode mode) + { + switch(mode) + { + case CholmodAuto: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + break; + case CholmodSimplicialLLt: + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + break; + case CholmodSupernodalLLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + break; + case CholmodLDLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + break; + default: + break; + } + } + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + } +}; + namespace internal { -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { - typedef CholmodDecomposition<_MatrixType,_UpLo> Dec; + typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec; EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const @@ -381,11 +574,11 @@ struct solve_retval, Rhs> } }; -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> { - typedef CholmodDecomposition<_MatrixType,_UpLo> Dec; + typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec; EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) template void evalTo(Dest& dst) const @@ -394,6 +587,8 @@ struct sparse_solve_retval, Rhs> } }; -} +} // end namespace internal + +} // end namespace Eigen #endif // EIGEN_CHOLMODSUPPORT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Array.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Array.h index a11fb1b53..4762933d7 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Array.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Array.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ARRAY_H #define EIGEN_ARRAY_H +namespace Eigen { + /** \class Array * \ingroup Core_Module * @@ -316,5 +318,6 @@ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) +} // end namespace Eigen #endif // EIGEN_ARRAY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayBase.h index 9399ac3d1..ec3a4be43 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayBase.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ARRAYBASE_H #define EIGEN_ARRAYBASE_H +namespace Eigen { + template class MatrixWrapper; /** \class ArrayBase @@ -159,7 +161,7 @@ template class ArrayBase /** \returns an \link MatrixBase Matrix \endlink expression of this array * \sa MatrixBase::array() */ MatrixWrapper matrix() { return derived(); } - const MatrixWrapper matrix() const { return derived(); } + const MatrixWrapper matrix() const { return derived(); } // template // inline void evalTo(Dest& dst) const { dst = matrix(); } @@ -174,10 +176,10 @@ template class ArrayBase protected: // mixing arrays and matrices is not legal template Derived& operator+=(const MatrixBase& ) - {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template Derived& operator-=(const MatrixBase& ) - {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /** replaces \c *this by \c *this - \a other. @@ -236,4 +238,6 @@ ArrayBase::operator/=(const ArrayBase& other) return derived(); } +} // end namespace Eigen + #endif // EIGEN_ARRAYBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayWrapper.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayWrapper.h index 07f082e1e..f8a442cee 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayWrapper.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/ArrayWrapper.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ARRAYWRAPPER_H #define EIGEN_ARRAYWRAPPER_H +namespace Eigen { + /** \class ArrayWrapper * \ingroup Core_Module * @@ -61,7 +63,7 @@ class ArrayWrapper : public ArrayBase > typedef typename internal::nested::type NestedExpressionType; - inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {} + inline ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} inline Index rows() const { return m_expression.rows(); } inline Index cols() const { return m_expression.cols(); } @@ -71,7 +73,7 @@ class ArrayWrapper : public ArrayBase > inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } inline const Scalar* data() const { return m_expression.data(); } - inline const CoeffReturnType coeff(Index row, Index col) const + inline CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } @@ -86,7 +88,7 @@ class ArrayWrapper : public ArrayBase > return m_expression.const_cast_derived().coeffRef(row, col); } - inline const CoeffReturnType coeff(Index index) const + inline CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } @@ -128,8 +130,14 @@ class ArrayWrapper : public ArrayBase > template inline void evalTo(Dest& dst) const { dst = m_expression; } + const typename internal::remove_all::type& + nestedExpression() const + { + return m_expression; + } + protected: - const NestedExpressionType m_expression; + NestedExpressionType m_expression; }; /** \class MatrixWrapper @@ -168,7 +176,7 @@ class MatrixWrapper : public MatrixBase > typedef typename internal::nested::type NestedExpressionType; - inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {} + inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} inline Index rows() const { return m_expression.rows(); } inline Index cols() const { return m_expression.cols(); } @@ -178,7 +186,7 @@ class MatrixWrapper : public MatrixBase > inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } inline const Scalar* data() const { return m_expression.data(); } - inline const CoeffReturnType coeff(Index row, Index col) const + inline CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } @@ -193,7 +201,7 @@ class MatrixWrapper : public MatrixBase > return m_expression.derived().coeffRef(row, col); } - inline const CoeffReturnType coeff(Index index) const + inline CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } @@ -232,8 +240,16 @@ class MatrixWrapper : public MatrixBase > m_expression.const_cast_derived().template writePacket(index, x); } + const typename internal::remove_all::type& + nestedExpression() const + { + return m_expression; + } + protected: - const NestedExpressionType m_expression; + NestedExpressionType m_expression; }; +} // end namespace Eigen + #endif // EIGEN_ARRAYWRAPPER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign.h index 3a17152f0..75390acf3 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign.h @@ -27,6 +27,8 @@ #ifndef EIGEN_ASSIGN_H #define EIGEN_ASSIGN_H +namespace Eigen { + namespace internal { /*************************************************************************** @@ -152,7 +154,7 @@ struct assign_DefaultTraversal_CompleteUnrolling inner = Index % Derived1::InnerSizeAtCompileTime }; - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { dst.copyCoeffByOuterInner(outer, inner, src); assign_DefaultTraversal_CompleteUnrolling::run(dst, src); @@ -162,13 +164,13 @@ struct assign_DefaultTraversal_CompleteUnrolling template struct assign_DefaultTraversal_CompleteUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} }; template struct assign_DefaultTraversal_InnerUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int outer) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer) { dst.copyCoeffByOuterInner(outer, Index, src); assign_DefaultTraversal_InnerUnrolling::run(dst, src, outer); @@ -178,7 +180,7 @@ struct assign_DefaultTraversal_InnerUnrolling template struct assign_DefaultTraversal_InnerUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {} + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {} }; /*********************** @@ -188,7 +190,7 @@ struct assign_DefaultTraversal_InnerUnrolling template struct assign_LinearTraversal_CompleteUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { dst.copyCoeff(Index, src); assign_LinearTraversal_CompleteUnrolling::run(dst, src); @@ -198,7 +200,7 @@ struct assign_LinearTraversal_CompleteUnrolling template struct assign_LinearTraversal_CompleteUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} }; /************************** @@ -214,7 +216,7 @@ struct assign_innervec_CompleteUnrolling JointAlignment = assign_traits::JointAlignment }; - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { dst.template copyPacketByOuterInner(outer, inner, src); assign_innervec_CompleteUnrolling struct assign_innervec_CompleteUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} }; template struct assign_innervec_InnerUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int outer) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer) { dst.template copyPacketByOuterInner(outer, Index, src); assign_innervec_InnerUnrolling struct assign_innervec_InnerUnrolling { - EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {} + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {} }; /*************************************************************************** @@ -251,24 +253,25 @@ struct assign_innervec_InnerUnrolling template::Traversal, - int Unrolling = assign_traits::Unrolling> + int Unrolling = assign_traits::Unrolling, + int Version = Specialized> struct assign_impl; /************************ *** Default traversal *** ************************/ -template -struct assign_impl +template +struct assign_impl { - inline static void run(Derived1 &, const Derived2 &) { } + static inline void run(Derived1 &, const Derived2 &) { } }; -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { const Index innerSize = dst.innerSize(); const Index outerSize = dst.outerSize(); @@ -278,21 +281,21 @@ struct assign_impl } }; -template -struct assign_impl +template +struct assign_impl { - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { assign_DefaultTraversal_CompleteUnrolling ::run(dst, src); } }; -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { const Index outerSize = dst.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) @@ -305,11 +308,11 @@ struct assign_impl *** Linear traversal *** ***********************/ -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { const Index size = dst.size(); for(Index i = 0; i < size; ++i) @@ -317,10 +320,10 @@ struct assign_impl } }; -template -struct assign_impl +template +struct assign_impl { - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { assign_LinearTraversal_CompleteUnrolling ::run(dst, src); @@ -331,11 +334,11 @@ struct assign_impl *** Inner vectorization *** **************************/ -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { const Index innerSize = dst.innerSize(); const Index outerSize = dst.outerSize(); @@ -346,21 +349,21 @@ struct assign_impl } }; -template -struct assign_impl +template +struct assign_impl { - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { assign_innervec_CompleteUnrolling ::run(dst, src); } }; -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { const Index outerSize = dst.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) @@ -398,11 +401,11 @@ struct unaligned_assign_impl } }; -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { const Index size = dst.size(); typedef packet_traits PacketTraits; @@ -412,7 +415,7 @@ struct assign_impl srcAlignment = assign_traits::JointAlignment }; const Index alignedStart = assign_traits::DstIsAligned ? 0 - : first_aligned(&dst.coeffRef(0), size); + : internal::first_aligned(&dst.coeffRef(0), size); const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; unaligned_assign_impl::DstIsAligned!=0>::run(src,dst,0,alignedStart); @@ -426,11 +429,11 @@ struct assign_impl } }; -template -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) { enum { size = Derived1::SizeAtCompileTime, packetSize = packet_traits::size, @@ -445,11 +448,11 @@ struct assign_impl -struct assign_impl +template +struct assign_impl { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { typedef packet_traits PacketTraits; enum { @@ -463,7 +466,7 @@ struct assign_impl const Index outerSize = dst.outerSize(); const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0; Index alignedStart = ((!alignable) || assign_traits::DstIsAligned) ? 0 - : first_aligned(&dst.coeffRef(0,0), innerSize); + : internal::first_aligned(&dst.coeffRef(0,0), innerSize); for(Index outer = 0; outer < outerSize; ++outer) { @@ -531,19 +534,19 @@ struct assign_selector; template struct assign_selector { - EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); } + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); } }; template struct assign_selector { - EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); } + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); } }; template struct assign_selector { - EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); } + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); } }; template struct assign_selector { - EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); } + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); } }; } // end namespace internal @@ -590,4 +593,6 @@ EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const ReturnByValue< return derived(); } +} // end namespace Eigen + #endif // EIGEN_ASSIGN_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign_MKL.h new file mode 100644 index 000000000..428c6367b --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Assign_MKL.h @@ -0,0 +1,224 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin() + ******************************************************************************** +*/ + +#ifndef EIGEN_ASSIGN_VML_H +#define EIGEN_ASSIGN_VML_H + +namespace Eigen { + +namespace internal { + +template struct vml_call +{ enum { IsSupported = 0 }; }; + +template +class vml_assign_traits +{ + private: + enum { + DstHasDirectAccess = Dst::Flags & DirectAccessBit, + SrcHasDirectAccess = Src::Flags & DirectAccessBit, + + StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)), + InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) + : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime) + : int(Dst::RowsAtCompileTime), + InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) + : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) + : int(Dst::MaxRowsAtCompileTime), + MaxSizeAtCompileTime = Dst::SizeAtCompileTime, + + MightEnableVml = vml_call::IsSupported && StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess + && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1, + MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit), + VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize, + LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD, + MayEnableVml = MightEnableVml && LargeEnough, + MayLinearize = MayEnableVml && MightLinearize + }; + public: + enum { + Traversal = MayLinearize ? LinearVectorizedTraversal + : MayEnableVml ? InnerVectorizedTraversal + : DefaultTraversal + }; +}; + +template::Traversal > +struct vml_assign_impl + : assign_impl,Traversal,Unrolling,BuiltIn> +{ +}; + +template +struct vml_assign_impl +{ + typedef typename Derived1::Scalar Scalar; + typedef typename Derived1::Index Index; + static inline void run(Derived1& dst, const CwiseUnaryOp& src) + { + // in case we want to (or have to) skip VML at runtime we can call: + // assign_impl,Traversal,Unrolling,BuiltIn>::run(dst,src); + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) { + const Scalar *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) : + &(src.nestedExpression().coeffRef(0, outer)); + Scalar *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); + vml_call::run(src.functor(), innerSize, src_ptr, dst_ptr ); + } + } +}; + +template +struct vml_assign_impl +{ + static inline void run(Derived1& dst, const CwiseUnaryOp& src) + { + // in case we want to (or have to) skip VML at runtime we can call: + // assign_impl,Traversal,Unrolling,BuiltIn>::run(dst,src); + vml_call::run(src.functor(), dst.size(), src.nestedExpression().data(), dst.data() ); + } +}; + +// Macroses + +#define EIGEN_MKL_VML_SPECIALIZE_ASSIGN(TRAVERSAL,UNROLLING) \ + template \ + struct assign_impl, TRAVERSAL, UNROLLING, Specialized> { \ + static inline void run(Derived1 &dst, const Eigen::CwiseUnaryOp &src) { \ + vml_assign_impl::run(dst, src); \ + } \ + }; + +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,InnerUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,InnerUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(SliceVectorizedTraversal,NoUnrolling) + + +#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1) +#define EIGEN_MKL_VML_MODE VML_HA +#else +#define EIGEN_MKL_VML_MODE VML_LA +#endif + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \ + template<> struct vml_call< scalar_##EIGENOP##_op > { \ + enum { IsSupported = 1 }; \ + static inline void run( const scalar_##EIGENOP##_op& /*func*/, \ + int size, const EIGENTYPE* src, EIGENTYPE* dst) { \ + VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst); \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \ + template<> struct vml_call< scalar_##EIGENOP##_op > { \ + enum { IsSupported = 1 }; \ + static inline void run( const scalar_##EIGENOP##_op& /*func*/, \ + int size, const EIGENTYPE* src, EIGENTYPE* dst) { \ + MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE; \ + VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst, vmlMode); \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \ + template<> struct vml_call< scalar_##EIGENOP##_op > { \ + enum { IsSupported = 1 }; \ + static inline void run( const scalar_##EIGENOP##_op& func, \ + int size, const EIGENTYPE* src, EIGENTYPE* dst) { \ + EIGENTYPE exponent = func.m_exponent; \ + MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE; \ + VMLOP(&size, (const VMLTYPE*)src, (const VMLTYPE*)&exponent, \ + (VMLTYPE*)dst, &vmlMode); \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vs##VMLOP, float, float) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vd##VMLOP, double, double) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vc##VMLOP, scomplex, MKL_Complex8) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vz##VMLOP, dcomplex, MKL_Complex16) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP) + + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vms##VMLOP, float, float) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmd##VMLOP, double, double) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmc##VMLOP, scomplex, MKL_Complex8) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmz##VMLOP, dcomplex, MKL_Complex16) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP) + + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sin, Sin) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(asin, Asin) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(cos, Cos) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(acos, Acos) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(tan, Tan) +//EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(exp, Exp) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(log, Ln) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt) + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr) + +// The vm*powx functions are not avaibale in the windows version of MKL. +#ifdef _WIN32 +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzpowx_, dcomplex, MKL_Complex16) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_VML_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h index 2570d7b55..8ef917de1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h @@ -25,8 +25,9 @@ #ifndef EIGEN_BANDMATRIX_H #define EIGEN_BANDMATRIX_H -namespace internal { +namespace Eigen { +namespace internal { template class BandMatrixBase : public EigenBase @@ -343,4 +344,6 @@ class TridiagonalMatrix : public BandMatrix::type& nestedExpression() const + { + return m_xpr; + } + + Index startRow() const + { + return m_startRow.value(); + } + + Index startCol() const + { + return m_startCol.value(); + } + protected: const typename XprType::Nested m_xpr; @@ -304,6 +321,11 @@ class Block init(); } + const typename internal::remove_all::type& nestedExpression() const + { + return m_xpr; + } + /** \sa MapBase::innerStride() */ inline Index innerStride() const { @@ -341,9 +363,10 @@ class Block : m_xpr.innerStride(); } - const typename XprType::Nested m_xpr; - int m_outerStride; + typename XprType::Nested m_xpr; + Index m_outerStride; }; +} // end namespace Eigen #endif // EIGEN_BLOCK_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/BooleanRedux.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/BooleanRedux.h index 5c3444a57..2c554a57d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/BooleanRedux.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/BooleanRedux.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ALLANDANY_H #define EIGEN_ALLANDANY_H +namespace Eigen { + namespace internal { template @@ -35,7 +37,7 @@ struct all_unroller row = (UnrollCount-1) % Derived::RowsAtCompileTime }; - inline static bool run(const Derived &mat) + static inline bool run(const Derived &mat) { return all_unroller::run(mat) && mat.coeff(row, col); } @@ -44,13 +46,13 @@ struct all_unroller template struct all_unroller { - inline static bool run(const Derived &mat) { return mat.coeff(0, 0); } + static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } }; template struct all_unroller { - inline static bool run(const Derived &) { return false; } + static inline bool run(const Derived &) { return false; } }; template @@ -61,7 +63,7 @@ struct any_unroller row = (UnrollCount-1) % Derived::RowsAtCompileTime }; - inline static bool run(const Derived &mat) + static inline bool run(const Derived &mat) { return any_unroller::run(mat) || mat.coeff(row, col); } @@ -70,13 +72,13 @@ struct any_unroller template struct any_unroller { - inline static bool run(const Derived &mat) { return mat.coeff(0, 0); } + static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } }; template struct any_unroller { - inline static bool run(const Derived &) { return false; } + static inline bool run(const Derived &) { return false; } }; } // end namespace internal @@ -146,4 +148,6 @@ inline typename DenseBase::Index DenseBase::count() const return derived().template cast().template cast().sum(); } +} // end namespace Eigen + #endif // EIGEN_ALLANDANY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h index 92422bf2f..f9ec1d587 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h @@ -26,6 +26,8 @@ #ifndef EIGEN_COMMAINITIALIZER_H #define EIGEN_COMMAINITIALIZER_H +namespace Eigen { + /** \class CommaInitializer * \ingroup Core_Module * @@ -147,4 +149,6 @@ DenseBase::operator<<(const DenseBase& other) return CommaInitializer(*static_cast(this), other); } +} // end namespace Eigen + #endif // EIGEN_COMMAINITIALIZER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseBinaryOp.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseBinaryOp.h index 7386b2e18..32599a7d9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseBinaryOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseBinaryOp.h @@ -26,6 +26,8 @@ #ifndef EIGEN_CWISE_BINARY_OP_H #define EIGEN_CWISE_BINARY_OP_H +namespace Eigen { + /** \class CwiseBinaryOp * \ingroup Core_Module * @@ -167,8 +169,8 @@ class CwiseBinaryOp : internal::no_assignment_operator, const BinaryOp& functor() const { return m_functor; } protected: - const LhsNested m_lhs; - const RhsNested m_rhs; + LhsNested m_lhs; + RhsNested m_rhs; const BinaryOp m_functor; }; @@ -237,4 +239,6 @@ MatrixBase::operator+=(const MatrixBase& other) return derived(); } +} // end namespace Eigen + #endif // EIGEN_CWISE_BINARY_OP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseNullaryOp.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseNullaryOp.h index c616e7ae1..a6d5e0934 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseNullaryOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseNullaryOp.h @@ -25,6 +25,8 @@ #ifndef EIGEN_CWISE_NULLARY_OP_H #define EIGEN_CWISE_NULLARY_OP_H +namespace Eigen { + /** \class CwiseNullaryOp * \ingroup Core_Module * @@ -101,6 +103,9 @@ class CwiseNullaryOp : internal::no_assignment_operator, return m_functor.packetOp(index); } + /** \returns the functor representing the nullary operation */ + const NullaryOp& functor() const { return m_functor; } + protected: const internal::variable_if_dynamic m_rows; const internal::variable_if_dynamic m_cols; @@ -238,6 +243,8 @@ DenseBase::Constant(const Scalar& value) * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization * and yields faster code than the random access version. * + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * * \only_for_vectors * * Example: \include DenseBase_LinSpaced_seq.cpp @@ -270,6 +277,7 @@ DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig * \brief Sets a linearly space vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * @@ -381,6 +389,7 @@ PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& valu * \brief Sets a linearly space vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * @@ -396,6 +405,23 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index size, const return derived() = Derived::NullaryExpr(size, internal::linspaced_op(low,high,size)); } +/** + * \brief Sets a linearly space vector. + * + * The function fill *this with equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return setLinSpaced(size(), low, high); +} + // zero: /** \returns an expression of a zero matrix. @@ -848,4 +874,6 @@ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() { return Derived::Unit(3); } +} // end namespace Eigen + #endif // EIGEN_CWISE_NULLARY_OP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryOp.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryOp.h index 958571d64..9110c9800 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryOp.h @@ -26,6 +26,8 @@ #ifndef EIGEN_CWISE_UNARY_OP_H #define EIGEN_CWISE_UNARY_OP_H +namespace Eigen { + /** \class CwiseUnaryOp * \ingroup Core_Module * @@ -95,7 +97,7 @@ class CwiseUnaryOp : internal::no_assignment_operator, nestedExpression() { return m_xpr.const_cast_derived(); } protected: - const typename XprType::Nested m_xpr; + typename XprType::Nested m_xpr; const UnaryOp m_functor; }; @@ -134,4 +136,6 @@ class CwiseUnaryOpImpl } }; +} // end namespace Eigen + #endif // EIGEN_CWISE_UNARY_OP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryView.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryView.h index d24ef0373..bf16243d5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryView.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/CwiseUnaryView.h @@ -25,6 +25,8 @@ #ifndef EIGEN_CWISE_UNARY_VIEW_H #define EIGEN_CWISE_UNARY_VIEW_H +namespace Eigen { + /** \class CwiseUnaryView * \ingroup Core_Module * @@ -97,7 +99,7 @@ class CwiseUnaryView : internal::no_assignment_operator, protected: // FIXME changed from MatrixType::Nested because of a weird compilation error with sun CC - const typename internal::nested::type m_matrix; + typename internal::nested::type m_matrix; ViewOp m_functor; }; @@ -143,6 +145,6 @@ class CwiseUnaryViewImpl } }; - +} // end namespace Eigen #endif // EIGEN_CWISE_UNARY_VIEW_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseBase.h index 838fa4030..1882dcca4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseBase.h @@ -26,6 +26,8 @@ #ifndef EIGEN_DENSEBASE_H #define EIGEN_DENSEBASE_H +namespace Eigen { + /** \class DenseBase * \ingroup Core_Module * @@ -169,8 +171,8 @@ template class DenseBase IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ - InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? SizeAtCompileTime - : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime, + InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) + : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from @@ -376,12 +378,13 @@ template class DenseBase inline Derived& operator*=(const Scalar& other); inline Derived& operator/=(const Scalar& other); + typedef typename internal::add_const_on_value_type::type>::type EvalReturnType; /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ - EIGEN_STRONG_INLINE const typename internal::eval::type eval() const + EIGEN_STRONG_INLINE EvalReturnType eval() const { // Even though MSVC does not honor strong inlining when the return type // is a dynamic matrix, we desperately need strong inlining for fixed @@ -540,4 +543,6 @@ template class DenseBase template explicit DenseBase(const DenseBase&); }; +} // end namespace Eigen + #endif // EIGEN_DENSEBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseCoeffsBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseCoeffsBase.h index e45238fb5..e1aa1a5f8 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseCoeffsBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseCoeffsBase.h @@ -25,6 +25,8 @@ #ifndef EIGEN_DENSECOEFFSBASE_H #define EIGEN_DENSECOEFFSBASE_H +namespace Eigen { + namespace internal { template struct add_const_on_value_type_if_arithmetic { @@ -710,16 +712,16 @@ namespace internal { template struct first_aligned_impl { - inline static typename Derived::Index run(const Derived&) + static inline typename Derived::Index run(const Derived&) { return 0; } }; template struct first_aligned_impl { - inline static typename Derived::Index run(const Derived& m) + static inline typename Derived::Index run(const Derived& m) { - return first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); + return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); } }; @@ -729,7 +731,7 @@ struct first_aligned_impl * documentation. */ template -inline static typename Derived::Index first_aligned(const Derived& m) +static inline typename Derived::Index first_aligned(const Derived& m) { return first_aligned_impl @@ -762,4 +764,6 @@ struct outer_stride_at_compile_time } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_DENSECOEFFSBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseStorage.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseStorage.h index 813053b00..0ea05bc90 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseStorage.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/DenseStorage.h @@ -33,6 +33,8 @@ #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN #endif +namespace Eigen { + namespace internal { struct constructor_without_unaligned_array_assert {}; @@ -104,8 +106,8 @@ template class DenseSt : m_data(internal::constructor_without_unaligned_array_assert()) {} inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } - inline static DenseIndex rows(void) {return _Rows;} - inline static DenseIndex cols(void) {return _Cols;} + static inline DenseIndex rows(void) {return _Rows;} + static inline DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} inline void resize(DenseIndex,DenseIndex,DenseIndex) {} inline const T *data() const { return m_data.array; } @@ -120,14 +122,24 @@ template class DenseStorage class DenseStorage +: public DenseStorage { }; + +template class DenseStorage +: public DenseStorage { }; + +template class DenseStorage +: public DenseStorage { }; + // dynamic-size matrix with fixed-size storage template class DenseStorage { @@ -241,7 +253,7 @@ template class DenseStorage(m_data, _Rows*m_cols); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } - inline static DenseIndex rows(void) {return _Rows;} + static inline DenseIndex rows(void) {return _Rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols) { @@ -278,7 +290,7 @@ template class DenseStorage(m_data, _Cols*m_rows); } inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline DenseIndex rows(void) const {return m_rows;} - inline static DenseIndex cols(void) {return _Cols;} + static inline DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); @@ -301,4 +313,6 @@ template class DenseStorage +// Copyright (C) 2009-2010 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -25,6 +26,8 @@ #ifndef EIGEN_DIAGONAL_H #define EIGEN_DIAGONAL_H +namespace Eigen { + /** \class Diagonal * \ingroup Core_Module * @@ -101,6 +104,15 @@ template class Diagonal return 0; } + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); } + inline const Scalar* data() const { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); } + inline Scalar& coeffRef(Index row, Index) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) @@ -133,8 +145,19 @@ template class Diagonal return m_matrix.coeff(index+rowOffset(), index+colOffset()); } + const typename internal::remove_all::type& + nestedExpression() const + { + return m_matrix; + } + + int index() const + { + return m_index.value(); + } + protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; const internal::variable_if_dynamic m_index; private: @@ -224,4 +247,6 @@ MatrixBase::diagonal() const return derived(); } +} // end namespace Eigen + #endif // EIGEN_DIAGONAL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h index f41a74bfa..844f9864b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h @@ -26,6 +26,8 @@ #ifndef EIGEN_DIAGONALMATRIX_H #define EIGEN_DIAGONALMATRIX_H +namespace Eigen { + #ifndef EIGEN_PARSED_BY_DOXYGEN template class DiagonalBase : public EigenBase @@ -72,7 +74,7 @@ class DiagonalBase : public EigenBase const DiagonalProduct operator*(const MatrixBase &matrix) const; - inline const DiagonalWrapper, const DiagonalVectorType> > + inline const DiagonalWrapper, const DiagonalVectorType> > inverse() const { return diagonal().cwiseInverse(); @@ -251,13 +253,13 @@ class DiagonalWrapper #endif /** Constructor from expression of diagonal coefficients to wrap. */ - inline DiagonalWrapper(const DiagonalVectorType& diagonal) : m_diagonal(diagonal) {} + inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {} /** \returns a const reference to the wrapped expression of diagonal coefficients. */ const DiagonalVectorType& diagonal() const { return m_diagonal; } protected: - const typename DiagonalVectorType::Nested m_diagonal; + typename DiagonalVectorType::Nested m_diagonal; }; /** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients @@ -303,4 +305,6 @@ bool MatrixBase::isDiagonal(RealScalar prec) const return true; } +} // end namespace Eigen + #endif // EIGEN_DIAGONALMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalProduct.h index de0c6ed11..9f6a99895 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalProduct.h @@ -26,6 +26,8 @@ #ifndef EIGEN_DIAGONALPRODUCT_H #define EIGEN_DIAGONALPRODUCT_H +namespace Eigen { + namespace internal { template struct traits > @@ -107,8 +109,8 @@ class DiagonalProduct : internal::no_assignment_operator, m_diagonal.diagonal().template packet(id)); } - const typename MatrixType::Nested m_matrix; - const typename DiagonalType::Nested m_diagonal; + typename MatrixType::Nested m_matrix; + typename DiagonalType::Nested m_diagonal; }; /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. @@ -131,5 +133,6 @@ DiagonalBase::operator*(const MatrixBase &matrix return DiagonalProduct(matrix.derived(), derived()); } +} // end namespace Eigen #endif // EIGEN_DIAGONALPRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h index 42da78498..67dbbf8ba 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h @@ -25,6 +25,8 @@ #ifndef EIGEN_DOT_H #define EIGEN_DOT_H +namespace Eigen { + namespace internal { // helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot @@ -176,7 +178,7 @@ template struct lpNorm_selector { typedef typename NumTraits::Scalar>::Real RealScalar; - inline static RealScalar run(const MatrixBase& m) + static inline RealScalar run(const MatrixBase& m) { return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); } @@ -185,7 +187,7 @@ struct lpNorm_selector template struct lpNorm_selector { - inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().sum(); } @@ -194,7 +196,7 @@ struct lpNorm_selector template struct lpNorm_selector { - inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.norm(); } @@ -203,7 +205,7 @@ struct lpNorm_selector template struct lpNorm_selector { - inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().maxCoeff(); } @@ -269,4 +271,6 @@ bool MatrixBase::isUnitary(RealScalar prec) const return true; } +} // end namespace Eigen + #endif // EIGEN_DOT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/EigenBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/EigenBase.h index 0472539af..77d4c25d5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/EigenBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/EigenBase.h @@ -26,6 +26,7 @@ #ifndef EIGEN_EIGENBASE_H #define EIGEN_EIGENBASE_H +namespace Eigen { /** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). * @@ -169,4 +170,6 @@ inline void MatrixBase::applyOnTheLeft(const EigenBase &o other.derived().applyThisOnTheLeft(derived()); } +} // end namespace Eigen + #endif // EIGEN_EIGENBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Flagged.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Flagged.h index 458213ab5..47f411b05 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Flagged.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Flagged.h @@ -25,6 +25,8 @@ #ifndef EIGEN_FLAGGED_H #define EIGEN_FLAGGED_H +namespace Eigen { + /** \class Flagged * \ingroup Core_Module * @@ -148,4 +150,6 @@ DenseBase::flagged() const return derived(); } +} // end namespace Eigen + #endif // EIGEN_FLAGGED_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/ForceAlignedAccess.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/ForceAlignedAccess.h index 11c1f8f70..238b7b770 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/ForceAlignedAccess.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/ForceAlignedAccess.h @@ -25,6 +25,8 @@ #ifndef EIGEN_FORCEALIGNEDACCESS_H #define EIGEN_FORCEALIGNEDACCESS_H +namespace Eigen { + /** \class ForceAlignedAccess * \ingroup Core_Module * @@ -154,4 +156,6 @@ MatrixBase::forceAlignedAccessIf() return derived(); } +} // end namespace Eigen + #endif // EIGEN_FORCEALIGNEDACCESS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h index 54636e0d4..f33f636bb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h @@ -25,6 +25,8 @@ #ifndef EIGEN_FUNCTORS_H #define EIGEN_FUNCTORS_H +namespace Eigen { + namespace internal { // associative functors: @@ -178,6 +180,18 @@ struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess=0 }; }; +/** \internal + * \brief Template functor to compute the pow of two scalars + */ +template struct scalar_binary_pow_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op) + inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); } +}; +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; +}; + // other binary functors: /** \internal @@ -220,6 +234,38 @@ struct functor_traits > { }; }; +/** \internal + * \brief Template functor to compute the and of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator&& + */ +struct scalar_boolean_and_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op) + EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the or of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator|| + */ +struct scalar_boolean_or_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op) + EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + // unary functors: /** \internal @@ -249,7 +295,7 @@ struct functor_traits > template struct scalar_abs_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return abs(a); } + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pabs(a); } @@ -271,7 +317,7 @@ struct functor_traits > template struct scalar_abs2_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return abs2(a); } + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pmul(a,a); } @@ -287,7 +333,7 @@ struct functor_traits > */ template struct scalar_conjugate_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op) - EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return conj(a); } + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return internal::conj(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); } }; @@ -324,7 +370,7 @@ template struct scalar_real_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return real(a); } + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); } }; template struct functor_traits > @@ -339,7 +385,7 @@ template struct scalar_imag_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return imag(a); } + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); } }; template struct functor_traits > @@ -354,7 +400,7 @@ template struct scalar_real_ref_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return real_ref(*const_cast(&a)); } + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast(&a)); } }; template struct functor_traits > @@ -369,7 +415,7 @@ template struct scalar_imag_ref_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return imag_ref(*const_cast(&a)); } + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast(&a)); } }; template struct functor_traits > @@ -383,7 +429,7 @@ struct functor_traits > */ template struct scalar_exp_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) - inline const Scalar operator() (const Scalar& a) const { return exp(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } }; @@ -399,7 +445,7 @@ struct functor_traits > */ template struct scalar_log_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) - inline const Scalar operator() (const Scalar& a) const { return log(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::log(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::plog(a); } }; @@ -584,7 +630,7 @@ template struct functor_traits< linspaced_o template struct linspaced_op { typedef typename packet_traits::type Packet; - linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {} + linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } @@ -657,7 +703,7 @@ struct functor_traits > */ template struct scalar_sqrt_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) - inline const Scalar operator() (const Scalar& a) const { return sqrt(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } }; @@ -675,7 +721,7 @@ struct functor_traits > */ template struct scalar_cos_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) - inline Scalar operator() (const Scalar& a) const { return cos(a); } + inline Scalar operator() (const Scalar& a) const { return internal::cos(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } }; @@ -694,7 +740,7 @@ struct functor_traits > */ template struct scalar_sin_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) - inline const Scalar operator() (const Scalar& a) const { return sin(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::psin(a); } }; @@ -714,7 +760,7 @@ struct functor_traits > */ template struct scalar_tan_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) - inline const Scalar operator() (const Scalar& a) const { return tan(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } }; @@ -733,7 +779,7 @@ struct functor_traits > */ template struct scalar_acos_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) - inline const Scalar operator() (const Scalar& a) const { return acos(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } }; @@ -752,7 +798,7 @@ struct functor_traits > */ template struct scalar_asin_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) - inline const Scalar operator() (const Scalar& a) const { return asin(a); } + inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); } typedef typename packet_traits::type Packet; inline Packet packetOp(const Packet& a) const { return internal::pasin(a); } }; @@ -781,6 +827,20 @@ template struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; +/** \internal + * \brief Template functor to compute the quotient between a scalar and array entries. + * \sa class CwiseUnaryOp, Cwise::inverse() + */ +template +struct scalar_inverse_mult_op { + scalar_inverse_mult_op(const Scalar& other) : m_other(other) {} + inline Scalar operator() (const Scalar& a) const { return m_other / a; } + template + inline const Packet packetOp(const Packet& a) const + { return internal::pdiv(pset1(m_other),a); } + Scalar m_other; +}; + /** \internal * \brief Template functor to compute the inverse of a scalar * \sa class CwiseUnaryOp, Cwise::inverse() @@ -939,4 +999,6 @@ struct functor_traits > } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_FUNCTORS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h index d266eed0a..887641163 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h @@ -26,6 +26,8 @@ #ifndef EIGEN_FUZZY_H #define EIGEN_FUZZY_H +namespace Eigen { + namespace internal { @@ -35,8 +37,8 @@ struct isApprox_selector static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) { using std::min; - const typename internal::nested::type nested(x); - const typename internal::nested::type otherNested(y); + typename internal::nested::type nested(x); + typename internal::nested::type otherNested(y); return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; @@ -158,4 +160,6 @@ bool DenseBase::isMuchSmallerThan( return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); } +} // end namespace Eigen + #endif // EIGEN_FUZZY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h new file mode 100644 index 000000000..4fbe1f14b --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h @@ -0,0 +1,628 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_GENERAL_PRODUCT_H +#define EIGEN_GENERAL_PRODUCT_H + +namespace Eigen { + +/** \class GeneralProduct + * \ingroup Core_Module + * + * \brief Expression of the product of two general matrices or vectors + * + * \param LhsNested the type used to store the left-hand side + * \param RhsNested the type used to store the right-hand side + * \param ProductMode the type of the product + * + * This class represents an expression of the product of two general matrices. + * We call a general matrix, a dense matrix with full storage. For instance, + * This excludes triangular, selfadjoint, and sparse matrices. + * It is the return type of the operator* between general matrices. Its template + * arguments are determined automatically by ProductReturnType. Therefore, + * GeneralProduct should never be used direclty. To determine the result type of a + * function which involves a matrix product, use ProductReturnType::Type. + * + * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) + */ +template::value> +class GeneralProduct; + +enum { + Large = 2, + Small = 3 +}; + +namespace internal { + +template struct product_type_selector; + +template struct product_size_category +{ + enum { is_large = MaxSize == Dynamic || + Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD, + value = is_large ? Large + : Size == 1 ? 1 + : Small + }; +}; + +template struct product_type +{ + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; + enum { + MaxRows = _Lhs::MaxRowsAtCompileTime, + Rows = _Lhs::RowsAtCompileTime, + MaxCols = _Rhs::MaxColsAtCompileTime, + Cols = _Rhs::ColsAtCompileTime, + MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime, + _Rhs::MaxRowsAtCompileTime), + Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime, + _Rhs::RowsAtCompileTime), + LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD + }; + + // the splitting into different lines of code here, introducing the _select enums and the typedef below, + // is to work around an internal compiler error with gcc 4.1 and 4.2. +private: + enum { + rows_select = product_size_category::value, + cols_select = product_size_category::value, + depth_select = product_size_category::value + }; + typedef product_type_selector selector; + +public: + enum { + value = selector::ret + }; +#ifdef EIGEN_DEBUG_PRODUCT + static void debug() + { + EIGEN_DEBUG_VAR(Rows); + EIGEN_DEBUG_VAR(Cols); + EIGEN_DEBUG_VAR(Depth); + EIGEN_DEBUG_VAR(rows_select); + EIGEN_DEBUG_VAR(cols_select); + EIGEN_DEBUG_VAR(depth_select); + EIGEN_DEBUG_VAR(value); + } +#endif +}; + + +/* The following allows to select the kind of product at compile time + * based on the three dimensions of the product. + * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ +// FIXME I'm not sure the current mapping is the ideal one. +template struct product_type_selector { enum { ret = OuterProduct }; }; +template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; +template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemvProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; + +} // end namespace internal + +/** \class ProductReturnType + * \ingroup Core_Module + * + * \brief Helper class to get the correct and optimized returned type of operator* + * + * \param Lhs the type of the left-hand side + * \param Rhs the type of the right-hand side + * \param ProductMode the type of the product (determined automatically by internal::product_mode) + * + * This class defines the typename Type representing the optimized product expression + * between two matrix expressions. In practice, using ProductReturnType::Type + * is the recommended way to define the result type of a function returning an expression + * which involve a matrix product. The class Product should never be + * used directly. + * + * \sa class Product, MatrixBase::operator*(const MatrixBase&) + */ +template +struct ProductReturnType +{ + // TODO use the nested type to reduce instanciations ???? +// typedef typename internal::nested::type LhsNested; +// typedef typename internal::nested::type RhsNested; + + typedef GeneralProduct Type; +}; + +template +struct ProductReturnType +{ + typedef typename internal::nested::type >::type LhsNested; + typedef typename internal::nested::type >::type RhsNested; + typedef CoeffBasedProduct Type; +}; + +template +struct ProductReturnType +{ + typedef typename internal::nested::type >::type LhsNested; + typedef typename internal::nested::type >::type RhsNested; + typedef CoeffBasedProduct Type; +}; + +// this is a workaround for sun CC +template +struct LazyProductReturnType : public ProductReturnType +{}; + +/*********************************************************************** +* Implementation of Inner Vector Vector Product +***********************************************************************/ + +// FIXME : maybe the "inner product" could return a Scalar +// instead of a 1x1 matrix ?? +// Pro: more natural for the user +// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix +// product ends up to a row-vector times col-vector product... To tackle this use +// case, we could have a specialization for Block with: operator=(Scalar x); + +namespace internal { + +template +struct traits > + : traits::ReturnType,1,1> > +{}; + +} + +template +class GeneralProduct + : internal::no_assignment_operator, + public Matrix::ReturnType,1,1> +{ + typedef Matrix::ReturnType,1,1> Base; + public: + GeneralProduct(const Lhs& lhs, const Rhs& rhs) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); + } + + /** Convertion to scalar */ + operator const typename Base::Scalar() const { + return Base::coeff(0,0); + } +}; + +/*********************************************************************** +* Implementation of Outer Vector Vector Product +***********************************************************************/ + +namespace internal { +template struct outer_product_selector; + +template +struct traits > + : traits, Lhs, Rhs> > +{}; + +} + +template +class GeneralProduct + : public ProductBase, Lhs, Rhs> +{ + public: + EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) + + GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + } + + template void scaleAndAddTo(Dest& dest, Scalar alpha) const + { + internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha); + } +}; + +namespace internal { + +template<> struct outer_product_selector { + template + static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; + // FIXME make sure lhs is sequentially stored + // FIXME not very good if rhs is real and lhs complex while alpha is real too + const Index cols = dest.cols(); + for (Index j=0; j struct outer_product_selector { + template + static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; + // FIXME make sure rhs is sequentially stored + // FIXME not very good if lhs is real and rhs complex while alpha is real too + const Index rows = dest.rows(); + for (Index i=0; i call fast BLAS-like colmajor routine + * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine + * 3 - all other cases are handled using a simple loop along the outer-storage direction. + * Therefore we need a lower level meta selector. + * Furthermore, if the matrix is the rhs, then the product has to be transposed. + */ +namespace internal { + +template +struct traits > + : traits, Lhs, Rhs> > +{}; + +template +struct gemv_selector; + +} // end namespace internal + +template +class GeneralProduct + : public ProductBase, Lhs, Rhs> +{ + public: + EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) + + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + + GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) + { +// EIGEN_STATIC_ASSERT((internal::is_same::value), +// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + } + + enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; + typedef typename internal::conditional::type MatrixType; + + template void scaleAndAddTo(Dest& dst, Scalar alpha) const + { + eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); + internal::gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); + } +}; + +namespace internal { + +// The vector is on the left => transposition +template +struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + Transpose destT(dest); + enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; + gemv_selector + ::run(GeneralProduct,Transpose, GemvProduct> + (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha); + } +}; + +template struct gemv_static_vector_if; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } +}; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE Scalar* data() { return 0; } +}; + +template +struct gemv_static_vector_if +{ + #if EIGEN_ALIGN_STATICALLY + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } + #else + // Some architectures cannot align on the stack, + // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. + enum { + ForceAlignment = internal::packet_traits::Vectorizable, + PacketSize = internal::packet_traits::size + }; + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { + return ForceAlignment + ? reinterpret_cast((reinterpret_cast(m_data.array) & ~(size_t(15))) + 16) + : m_data.array; + } + #endif +}; + +template<> struct gemv_selector +{ + template + static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename ProductType::Index Index; + typedef typename ProductType::LhsScalar LhsScalar; + typedef typename ProductType::RhsScalar RhsScalar; + typedef typename ProductType::Scalar ResScalar; + typedef typename ProductType::RealScalar RealScalar; + typedef typename ProductType::ActualLhsType ActualLhsType; + typedef typename ProductType::ActualRhsType ActualRhsType; + typedef typename ProductType::LhsBlasTraits LhsBlasTraits; + typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + typedef Map, Aligned> MappedDest; + + ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); + ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); + + ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) + * RhsBlasTraits::extractScalarFactor(prod.rhs()); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal + }; + + gemv_static_vector_if static_dest; + + bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0)); + bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; + + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + + ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), + evalToDest ? dest.data() : static_dest.data()); + + if(!evalToDest) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if(!alphaIsCompatible) + { + MappedDest(actualDestPtr, dest.size()).setZero(); + compatibleAlpha = RhsScalar(1); + } + else + MappedDest(actualDestPtr, dest.size()) = dest; + } + + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + actualLhs.data(), actualLhs.outerStride(), + actualRhs.data(), actualRhs.innerStride(), + actualDestPtr, 1, + compatibleAlpha); + + if (!evalToDest) + { + if(!alphaIsCompatible) + dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); + else + dest = MappedDest(actualDestPtr, dest.size()); + } + } +}; + +template<> struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename ProductType::LhsScalar LhsScalar; + typedef typename ProductType::RhsScalar RhsScalar; + typedef typename ProductType::Scalar ResScalar; + typedef typename ProductType::Index Index; + typedef typename ProductType::ActualLhsType ActualLhsType; + typedef typename ProductType::ActualRhsType ActualRhsType; + typedef typename ProductType::_ActualRhsType _ActualRhsType; + typedef typename ProductType::LhsBlasTraits LhsBlasTraits; + typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + + typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); + + ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) + * RhsBlasTraits::extractScalarFactor(prod.rhs()); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 + }; + + gemv_static_vector_if static_rhs; + + ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), + DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); + + if(!DirectlyUseRhs) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + Map(actualRhsPtr, actualRhs.size()) = actualRhs; + } + + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + actualLhs.data(), actualLhs.outerStride(), + actualRhsPtr, 1, + dest.data(), dest.innerStride(), + actualAlpha); + } +}; + +template<> struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename Dest::Index Index; + // TODO makes sure dest is sequentially stored in memory, otherwise use a temp + const Index size = prod.rhs().rows(); + for(Index k=0; k struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename Dest::Index Index; + // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp + const Index rows = prod.rows(); + for(Index i=0; i +template +inline const typename ProductReturnType::Type +MatrixBase::operator*(const MatrixBase &other) const +{ + // A note regarding the function declaration: In MSVC, this function will sometimes + // not be inlined since DenseStorage is an unwindable object for dynamic + // matrices and product types are holding a member to store the result. + // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) +#ifdef EIGEN_DEBUG_PRODUCT + internal::product_type::debug(); +#endif + return typename ProductReturnType::Type(derived(), other.derived()); +} + +/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. + * + * The returned product will behave like any other expressions: the coefficients of the product will be + * computed once at a time as requested. This might be useful in some extremely rare cases when only + * a small and no coherent fraction of the result's coefficients have to be computed. + * + * \warning This version of the matrix product can be much much slower. So use it only if you know + * what you are doing and that you measured a true speed improvement. + * + * \sa operator*(const MatrixBase&) + */ +template +template +const typename LazyProductReturnType::Type +MatrixBase::lazyProduct(const MatrixBase &other) const +{ + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + + return typename LazyProductReturnType::Type(derived(), other.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h index 8ed835327..d92ac9529 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h @@ -26,6 +26,8 @@ #ifndef EIGEN_GENERIC_PACKET_MATH_H #define EIGEN_GENERIC_PACKET_MATH_H +namespace Eigen { + namespace internal { /** \internal @@ -312,7 +314,7 @@ template struct palign_impl { // by default data are aligned, so there is nothing to be done :) - inline static void run(PacketType&, const PacketType&) {} + static inline void run(PacketType&, const PacketType&) {} }; /** \internal update \a first using the concatenation of the \a Offset last elements @@ -335,5 +337,7 @@ template<> inline std::complex pmul(const std::complex& a, const } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_GENERIC_PACKET_MATH_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/GlobalFunctions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/GlobalFunctions.h index 144145a95..94605252d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/GlobalFunctions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/GlobalFunctions.h @@ -66,13 +66,36 @@ namespace std template inline const Eigen::CwiseUnaryOp, const Derived> - pow(const Eigen::ArrayBase& x, const typename Derived::Scalar& exponent) { \ - return x.derived().pow(exponent); \ + pow(const Eigen::ArrayBase& x, const typename Derived::Scalar& exponent) { + return x.derived().pow(exponent); + } + + template + inline const Eigen::CwiseBinaryOp, const Derived, const Derived> + pow(const Eigen::ArrayBase& x, const Eigen::ArrayBase& exponents) + { + return Eigen::CwiseBinaryOp, const Derived, const Derived>( + x.derived(), + exponents.derived() + ); } } namespace Eigen { + /** + * \brief Component-wise division of a scalar by array elements. + **/ + template + inline const Eigen::CwiseUnaryOp, const Derived> + operator/(typename Derived::Scalar s, const Eigen::ArrayBase& a) + { + return Eigen::CwiseUnaryOp, const Derived>( + a.derived(), + Eigen::internal::scalar_inverse_mult_op(s) + ); + } + namespace internal { EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/IO.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/IO.h index f3cfcdbf4..2f1906f2a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/IO.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/IO.h @@ -26,6 +26,8 @@ #ifndef EIGEN_IO_H #define EIGEN_IO_H +namespace Eigen { + enum { DontAlignCols = 1 }; enum { StreamPrecision = -1, FullPrecision = -2 }; @@ -171,7 +173,7 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& return s; } - const typename Derived::Nested m = _m; + typename Derived::Nested m = _m; typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; @@ -257,4 +259,6 @@ std::ostream & operator << return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); } +} // end namespace Eigen + #endif // EIGEN_IO_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Map.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Map.h index 2bf80b3af..360a2280e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Map.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Map.h @@ -26,6 +26,8 @@ #ifndef EIGEN_MAP_H #define EIGEN_MAP_H +namespace Eigen { + /** \class Map * \ingroup Core_Module * @@ -200,4 +202,6 @@ inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> this->_set_noalias(Eigen::Map(data)); } +} // end namespace Eigen + #endif // EIGEN_MAP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/MapBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/MapBase.h index 9426e2d24..2b736cb74 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/MapBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/MapBase.h @@ -30,6 +30,7 @@ EIGEN_STATIC_ASSERT((int(internal::traits::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) +namespace Eigen { /** \class MapBase * \ingroup Core_Module @@ -251,5 +252,6 @@ template class MapBase using Base::Base::operator=; }; +} // end namespace Eigen #endif // EIGEN_MAPBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h index 2b454db21..ab153c1eb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H +namespace Eigen { + namespace internal { /** \internal \struct global_math_functions_filtering_base @@ -309,8 +311,7 @@ struct abs2_impl > { static inline RealScalar run(const std::complex& x) { - using std::norm; - return norm(x); + return real(x)*real(x) + imag(x)*imag(x); } }; @@ -553,7 +554,7 @@ struct pow_default_impl { static inline Scalar run(Scalar x, Scalar y) { - Scalar res = 1; + Scalar res(1); eigen_assert(!NumTraits::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; @@ -838,6 +839,19 @@ template<> struct scalar_fuzzy_impl }; +/**************************************************************************** +* Special functions * +****************************************************************************/ + +// std::isfinite is non standard, so let's define our own version, +// even though it is not very efficient. +template bool isfinite(const T& x) +{ + return x::highest() && x>NumTraits::lowest(); +} + } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_MATHFUNCTIONS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Matrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Matrix.h index 982c9256a..8742a0130 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Matrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Matrix.h @@ -26,6 +26,8 @@ #ifndef EIGEN_MATRIX_H #define EIGEN_MATRIX_H +namespace Eigen { + /** \class Matrix * \ingroup Core_Module * @@ -411,25 +413,8 @@ EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS +#undef EIGEN_MAKE_FIXED_TYPEDEFS -#undef EIGEN_MAKE_TYPEDEFS_LARGE - -#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ -using Eigen::Matrix##SizeSuffix##TypeSuffix; \ -using Eigen::Vector##SizeSuffix##TypeSuffix; \ -using Eigen::RowVector##SizeSuffix##TypeSuffix; - -#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ - -#define EIGEN_USING_MATRIX_TYPEDEFS \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \ -EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd) +} // end namespace Eigen #endif // EIGEN_MATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h index 62877bce0..5a744c5ec 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h @@ -26,6 +26,8 @@ #ifndef EIGEN_MATRIXBASE_H #define EIGEN_MATRIXBASE_H +namespace Eigen { + /** \class MatrixBase * \ingroup Core_Module * @@ -250,8 +252,7 @@ template class MatrixBase // huuuge hack. make Eigen2's matrix.part() work in eigen3. Problem: Diagonal is now a class template instead // of an integer constant. Solution: overload the part() method template wrt template parameters list. - // Note: replacing next line by "template class U>" produces a mysterious error C2082 in MSVC. - template class U> + template class U> const DiagonalWrapper part() const { return diagonal().asDiagonal(); } #endif // EIGEN2_SUPPORT @@ -331,7 +332,7 @@ template class MatrixBase /** \returns an \link ArrayBase Array \endlink expression of this matrix * \sa ArrayBase::matrix() */ ArrayWrapper array() { return derived(); } - const ArrayWrapper array() const { return derived(); } + const ArrayWrapper array() const { return derived(); } /////////// LU module /////////// @@ -466,6 +467,8 @@ template class MatrixBase const MatrixFunctionReturnValue sinh() const; const MatrixFunctionReturnValue cos() const; const MatrixFunctionReturnValue sin() const; + const MatrixSquareRootReturnValue sqrt() const; + const MatrixLogarithmReturnValue log() const; #ifdef EIGEN2_SUPPORT template @@ -512,10 +515,12 @@ template class MatrixBase protected: // mixing arrays and matrices is not legal template Derived& operator+=(const ArrayBase& ) - {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template Derived& operator-=(const ArrayBase& ) - {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; +} // end namespace Eigen + #endif // EIGEN_MATRIXBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/NestByValue.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/NestByValue.h index a6104d2a4..cfe3e7990 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/NestByValue.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/NestByValue.h @@ -26,6 +26,8 @@ #ifndef EIGEN_NESTBYVALUE_H #define EIGEN_NESTBYVALUE_H +namespace Eigen { + /** \class NestByValue * \ingroup Core_Module * @@ -119,4 +121,6 @@ DenseBase::nestByValue() const return NestByValue(derived()); } +} // end namespace Eigen + #endif // EIGEN_NESTBYVALUE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/NoAlias.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/NoAlias.h index da64affcf..5278cfb73 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/NoAlias.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/NoAlias.h @@ -25,6 +25,8 @@ #ifndef EIGEN_NOALIAS_H #define EIGEN_NOALIAS_H +namespace Eigen { + /** \class NoAlias * \ingroup Core_Module * @@ -133,4 +135,6 @@ NoAlias MatrixBase::noalias() return derived(); } +} // end namespace Eigen + #endif // EIGEN_NOALIAS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h index 73ef05dfe..e8867235e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h @@ -25,6 +25,8 @@ #ifndef EIGEN_NUMTRAITS_H #define EIGEN_NUMTRAITS_H +namespace Eigen { + /** \class NumTraits * \ingroup Core_Module * @@ -81,14 +83,14 @@ template struct GenericNumTraits >::type NonInteger; typedef T Nested; - inline static Real epsilon() { return std::numeric_limits::epsilon(); } - inline static Real dummy_precision() + static inline Real epsilon() { return std::numeric_limits::epsilon(); } + static inline Real dummy_precision() { // make sure to override this for floating-point types return Real(0); } - inline static T highest() { return (std::numeric_limits::max)(); } - inline static T lowest() { return IsInteger ? (std::numeric_limits::min)() : (-(std::numeric_limits::max)()); } + static inline T highest() { return (std::numeric_limits::max)(); } + static inline T lowest() { return IsInteger ? (std::numeric_limits::min)() : (-(std::numeric_limits::max)()); } #ifdef EIGEN2_SUPPORT enum { @@ -104,12 +106,12 @@ template struct NumTraits : GenericNumTraits template<> struct NumTraits : GenericNumTraits { - inline static float dummy_precision() { return 1e-5f; } + static inline float dummy_precision() { return 1e-5f; } }; template<> struct NumTraits : GenericNumTraits { - inline static double dummy_precision() { return 1e-12; } + static inline double dummy_precision() { return 1e-12; } }; template<> struct NumTraits @@ -130,8 +132,8 @@ template struct NumTraits > MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost }; - inline static Real epsilon() { return NumTraits::epsilon(); } - inline static Real dummy_precision() { return NumTraits::dummy_precision(); } + static inline Real epsilon() { return NumTraits::epsilon(); } + static inline Real dummy_precision() { return NumTraits::dummy_precision(); } }; template @@ -155,6 +157,6 @@ struct NumTraits > }; }; - +} // end namespace Eigen #endif // EIGEN_NUMTRAITS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h index a064e053e..e0d618dfb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h @@ -26,6 +26,8 @@ #ifndef EIGEN_PERMUTATIONMATRIX_H #define EIGEN_PERMUTATIONMATRIX_H +namespace Eigen { + template class PermutedImpl; /** \class PermutationBase @@ -56,6 +58,8 @@ namespace internal { template struct permut_matrix_product_retval; +template +struct permut_sparsematrix_product_retval; enum PermPermProduct_t {PermPermProduct}; } // end namespace internal @@ -511,7 +515,7 @@ class PermutationWrapper : public PermutationBase MatrixBase::asPermutation() con return derived(); } +} // end namespace Eigen + #endif // EIGEN_PERMUTATIONMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h index 612254e9d..f9c432732 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h @@ -32,6 +32,8 @@ # define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED #endif +namespace Eigen { + namespace internal { template @@ -47,13 +49,13 @@ EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols) throw_std_bad_alloc(); } -template (Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl; +template struct conservative_resize_like_impl; template struct matrix_swap_impl; } // end namespace internal -/** +/** \class PlainObjectBase * \brief %Dense storage base class for matrices and arrays. * * This class can be extended with the help of the plugin mechanism described on the page @@ -61,8 +63,29 @@ template struct m * * \sa \ref TopicClassHierarchy */ +#ifdef EIGEN_PARSED_BY_DOXYGEN +namespace internal { + +// this is a warkaround to doxygen not being able to understand the inheritence logic +// when it is hidden by the dense_xpr_base helper struct. +template struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase {}; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher_for_doxygen > + : public MatrixBase > {}; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher_for_doxygen > + : public ArrayBase > {}; + +} // namespace internal + +template +class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen +#else template class PlainObjectBase : public internal::dense_xpr_base::type +#endif { public: enum { Options = internal::traits::Options }; @@ -443,68 +466,68 @@ class PlainObjectBase : public internal::dense_xpr_base::type * \see class Map */ //@{ - inline static ConstMapType Map(const Scalar* data) + static inline ConstMapType Map(const Scalar* data) { return ConstMapType(data); } - inline static MapType Map(Scalar* data) + static inline MapType Map(Scalar* data) { return MapType(data); } - inline static ConstMapType Map(const Scalar* data, Index size) + static inline ConstMapType Map(const Scalar* data, Index size) { return ConstMapType(data, size); } - inline static MapType Map(Scalar* data, Index size) + static inline MapType Map(Scalar* data, Index size) { return MapType(data, size); } - inline static ConstMapType Map(const Scalar* data, Index rows, Index cols) + static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) { return ConstMapType(data, rows, cols); } - inline static MapType Map(Scalar* data, Index rows, Index cols) + static inline MapType Map(Scalar* data, Index rows, Index cols) { return MapType(data, rows, cols); } - inline static ConstAlignedMapType MapAligned(const Scalar* data) + static inline ConstAlignedMapType MapAligned(const Scalar* data) { return ConstAlignedMapType(data); } - inline static AlignedMapType MapAligned(Scalar* data) + static inline AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } - inline static ConstAlignedMapType MapAligned(const Scalar* data, Index size) + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) { return ConstAlignedMapType(data, size); } - inline static AlignedMapType MapAligned(Scalar* data, Index size) + static inline AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } - inline static ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { return ConstAlignedMapType(data, rows, cols); } - inline static AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) + static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } template - inline static typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) + static inline typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) { return typename StridedConstMapType >::type(data, stride); } template - inline static typename StridedMapType >::type Map(Scalar* data, const Stride& stride) + static inline typename StridedMapType >::type Map(Scalar* data, const Stride& stride) { return typename StridedMapType >::type(data, stride); } template - inline static typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) + static inline typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstMapType >::type(data, size, stride); } template - inline static typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) + static inline typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) { return typename StridedMapType >::type(data, size, stride); } template - inline static typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) + static inline typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstMapType >::type(data, rows, cols, stride); } template - inline static typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) + static inline typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedMapType >::type(data, rows, cols, stride); } template - inline static typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, stride); } template - inline static typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) { return typename StridedAlignedMapType >::type(data, stride); } template - inline static typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, size, stride); } template - inline static typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) { return typename StridedAlignedMapType >::type(data, size, stride); } template - inline static typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } template - inline static typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } //@} @@ -594,6 +617,9 @@ class PlainObjectBase : public internal::dense_xpr_base::type template EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if::type* = 0) { + EIGEN_STATIC_ASSERT(bool(NumTraits::IsInteger) && + bool(NumTraits::IsInteger), + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); internal::check_rows_cols_for_overflow(rows, cols); @@ -623,7 +649,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type public: #ifndef EIGEN_PARSED_BY_DOXYGEN - EIGEN_STRONG_INLINE static void _check_template_params() + static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0) @@ -751,4 +777,6 @@ struct matrix_swap_impl } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_DENSESTORAGEBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Product.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Product.h index e2035b242..53eb0fbae 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Product.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Product.h @@ -1,8 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2006-2008 Benoit Jacob -// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -26,600 +25,89 @@ #ifndef EIGEN_PRODUCT_H #define EIGEN_PRODUCT_H -/** \class GeneralProduct +template class Product; +template class ProductImpl; + +/** \class Product * \ingroup Core_Module * - * \brief Expression of the product of two general matrices or vectors + * \brief Expression of the product of two arbitrary matrices or vectors * - * \param LhsNested the type used to store the left-hand side - * \param RhsNested the type used to store the right-hand side - * \param ProductMode the type of the product + * \param Lhs the type of the left-hand side expression + * \param Rhs the type of the right-hand side expression * - * This class represents an expression of the product of two general matrices. - * We call a general matrix, a dense matrix with full storage. For instance, - * This excludes triangular, selfadjoint, and sparse matrices. - * It is the return type of the operator* between general matrices. Its template - * arguments are determined automatically by ProductReturnType. Therefore, - * GeneralProduct should never be used direclty. To determine the result type of a - * function which involves a matrix product, use ProductReturnType::Type. + * This class represents an expression of the product of two arbitrary matrices. * - * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) */ -template::value> -class GeneralProduct; - -enum { - Large = 2, - Small = 3 -}; namespace internal { - -template struct product_type_selector; - -template struct product_size_category +template +struct traits > { - enum { is_large = MaxSize == Dynamic || - Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD, - value = is_large ? Large - : Size == 1 ? 1 - : Small + typedef MatrixXpr XprKind; + typedef typename remove_all::type LhsCleaned; + typedef typename remove_all::type RhsCleaned; + typedef typename scalar_product_traits::Scalar, typename traits::Scalar>::ReturnType Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + enum { + RowsAtCompileTime = LhsCleaned::RowsAtCompileTime, + ColsAtCompileTime = RhsCleaned::ColsAtCompileTime, + MaxRowsAtCompileTime = LhsCleaned::MaxRowsAtCompileTime, + MaxColsAtCompileTime = RhsCleaned::MaxColsAtCompileTime, + Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0), // TODO should be no storage order + CoeffReadCost = 0 // TODO CoeffReadCost should not be part of the expression traits }; }; - -template struct product_type -{ - typedef typename remove_all::type _Lhs; - typedef typename remove_all::type _Rhs; - enum { - MaxRows = _Lhs::MaxRowsAtCompileTime, - Rows = _Lhs::RowsAtCompileTime, - MaxCols = _Rhs::MaxColsAtCompileTime, - Cols = _Rhs::ColsAtCompileTime, - MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime, - _Rhs::MaxRowsAtCompileTime), - Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime, - _Rhs::RowsAtCompileTime), - LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD - }; - - // the splitting into different lines of code here, introducing the _select enums and the typedef below, - // is to work around an internal compiler error with gcc 4.1 and 4.2. -private: - enum { - rows_select = product_size_category::value, - cols_select = product_size_category::value, - depth_select = product_size_category::value - }; - typedef product_type_selector selector; - -public: - enum { - value = selector::ret - }; -#ifdef EIGEN_DEBUG_PRODUCT - static void debug() - { - EIGEN_DEBUG_VAR(Rows); - EIGEN_DEBUG_VAR(Cols); - EIGEN_DEBUG_VAR(Depth); - EIGEN_DEBUG_VAR(rows_select); - EIGEN_DEBUG_VAR(cols_select); - EIGEN_DEBUG_VAR(depth_select); - EIGEN_DEBUG_VAR(value); - } -#endif -}; - - -/* The following allows to select the kind of product at compile time - * based on the three dimensions of the product. - * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ -// FIXME I'm not sure the current mapping is the ideal one. -template struct product_type_selector { enum { ret = OuterProduct }; }; -template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; -template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; -template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; -template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; -template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = GemvProduct }; }; -template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; -template<> struct product_type_selector { enum { ret = GemmProduct }; }; - } // end namespace internal -/** \class ProductReturnType - * \ingroup Core_Module - * - * \brief Helper class to get the correct and optimized returned type of operator* - * - * \param Lhs the type of the left-hand side - * \param Rhs the type of the right-hand side - * \param ProductMode the type of the product (determined automatically by internal::product_mode) - * - * This class defines the typename Type representing the optimized product expression - * between two matrix expressions. In practice, using ProductReturnType::Type - * is the recommended way to define the result type of a function returning an expression - * which involve a matrix product. The class Product should never be - * used directly. - * - * \sa class Product, MatrixBase::operator*(const MatrixBase&) - */ -template -struct ProductReturnType -{ - // TODO use the nested type to reduce instanciations ???? -// typedef typename internal::nested::type LhsNested; -// typedef typename internal::nested::type RhsNested; - - typedef GeneralProduct Type; -}; template -struct ProductReturnType -{ - typedef typename internal::nested::type >::type LhsNested; - typedef typename internal::nested::type >::type RhsNested; - typedef CoeffBasedProduct Type; -}; - -template -struct ProductReturnType -{ - typedef typename internal::nested::type >::type LhsNested; - typedef typename internal::nested::type >::type RhsNested; - typedef CoeffBasedProduct Type; -}; - -// this is a workaround for sun CC -template -struct LazyProductReturnType : public ProductReturnType -{}; - -/*********************************************************************** -* Implementation of Inner Vector Vector Product -***********************************************************************/ - -// FIXME : maybe the "inner product" could return a Scalar -// instead of a 1x1 matrix ?? -// Pro: more natural for the user -// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix -// product ends up to a row-vector times col-vector product... To tackle this use -// case, we could have a specialization for Block with: operator=(Scalar x); - -namespace internal { - -template -struct traits > - : traits::ReturnType,1,1> > -{}; - -} - -template -class GeneralProduct - : internal::no_assignment_operator, - public Matrix::ReturnType,1,1> -{ - typedef Matrix::ReturnType,1,1> Base; - public: - GeneralProduct(const Lhs& lhs, const Rhs& rhs) - { - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - - Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); - } - - /** Convertion to scalar */ - operator const typename Base::Scalar() const { - return Base::coeff(0,0); - } -}; - -/*********************************************************************** -* Implementation of Outer Vector Vector Product -***********************************************************************/ - -namespace internal { -template struct outer_product_selector; - -template -struct traits > - : traits, Lhs, Rhs> > -{}; - -} - -template -class GeneralProduct - : public ProductBase, Lhs, Rhs> +class Product : public ProductImpl::StorageKind, + typename internal::traits::StorageKind>::ret> { public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) - - GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - { - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - } - - template void scaleAndAddTo(Dest& dest, Scalar alpha) const - { - internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha); - } -}; - -namespace internal { - -template<> struct outer_product_selector { - template - static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { - typedef typename Dest::Index Index; - // FIXME make sure lhs is sequentially stored - // FIXME not very good if rhs is real and lhs complex while alpha is real too - const Index cols = dest.cols(); - for (Index j=0; j struct outer_product_selector { - template - static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { - typedef typename Dest::Index Index; - // FIXME make sure rhs is sequentially stored - // FIXME not very good if lhs is real and rhs complex while alpha is real too - const Index rows = dest.rows(); - for (Index i=0; i call fast BLAS-like colmajor routine - * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine - * 3 - all other cases are handled using a simple loop along the outer-storage direction. - * Therefore we need a lower level meta selector. - * Furthermore, if the matrix is the rhs, then the product has to be transposed. - */ -namespace internal { - -template -struct traits > - : traits, Lhs, Rhs> > -{}; - -template -struct gemv_selector; - -} // end namespace internal - -template -class GeneralProduct - : public ProductBase, Lhs, Rhs> -{ - public: - EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) - - typedef typename Lhs::Scalar LhsScalar; - typedef typename Rhs::Scalar RhsScalar; - - GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) - { -// EIGEN_STATIC_ASSERT((internal::is_same::value), -// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - } - - enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; - typedef typename internal::conditional::type MatrixType; - - template void scaleAndAddTo(Dest& dst, Scalar alpha) const - { - eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); - internal::gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); - } -}; - -namespace internal { - -// The vector is on the left => transposition -template -struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) - { - Transpose destT(dest); - enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; - gemv_selector - ::run(GeneralProduct,Transpose, GemvProduct> - (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha); - } -}; - -template struct gemv_static_vector_if; - -template -struct gemv_static_vector_if -{ - EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } -}; - -template -struct gemv_static_vector_if -{ - EIGEN_STRONG_INLINE Scalar* data() { return 0; } -}; - -template -struct gemv_static_vector_if -{ - #if EIGEN_ALIGN_STATICALLY - internal::plain_array m_data; - EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } - #else - // Some architectures cannot align on the stack, - // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. - enum { - ForceAlignment = internal::packet_traits::Vectorizable, - PacketSize = internal::packet_traits::size - }; - internal::plain_array m_data; - EIGEN_STRONG_INLINE Scalar* data() { - return ForceAlignment - ? reinterpret_cast((reinterpret_cast(m_data.array) & ~(size_t(15))) + 16) - : m_data.array; - } - #endif -}; - -template<> struct gemv_selector -{ - template - static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) - { - typedef typename ProductType::Index Index; - typedef typename ProductType::LhsScalar LhsScalar; - typedef typename ProductType::RhsScalar RhsScalar; - typedef typename ProductType::Scalar ResScalar; - typedef typename ProductType::RealScalar RealScalar; - typedef typename ProductType::ActualLhsType ActualLhsType; - typedef typename ProductType::ActualRhsType ActualRhsType; - typedef typename ProductType::LhsBlasTraits LhsBlasTraits; - typedef typename ProductType::RhsBlasTraits RhsBlasTraits; - typedef Map, Aligned> MappedDest; - - const ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); - const ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); - - ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) - * RhsBlasTraits::extractScalarFactor(prod.rhs()); - - enum { - // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 - // on, the other hand it is good for the cache to pack the vector anyways... - EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, - ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), - MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal - }; - - gemv_static_vector_if static_dest; - - // this is written like this (i.e., with a ?:) to workaround an ICE with ICC 12 - bool alphaIsCompatible = (!ComplexByReal) ? true : (imag(actualAlpha)==RealScalar(0)); - bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; - RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + typedef typename ProductImpl< + Lhs, Rhs, + typename internal::promote_storage_type::ret>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) - ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), - evalToDest ? dest.data() : static_dest.data()); - - if(!evalToDest) + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef typename internal::remove_all::type LhsNestedCleaned; + typedef typename internal::remove_all::type RhsNestedCleaned; + + Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { - #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN - int size = dest.size(); - EIGEN_DENSE_STORAGE_CTOR_PLUGIN - #endif - if(!alphaIsCompatible) - { - MappedDest(actualDestPtr, dest.size()).setZero(); - compatibleAlpha = RhsScalar(1); - } - else - MappedDest(actualDestPtr, dest.size()) = dest; + eigen_assert(lhs.cols() == rhs.rows() + && "invalid matrix product" + && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } - general_matrix_vector_product - ::run( - actualLhs.rows(), actualLhs.cols(), - &actualLhs.coeffRef(0,0), actualLhs.outerStride(), - actualRhs.data(), actualRhs.innerStride(), - actualDestPtr, 1, - compatibleAlpha); + inline Index rows() const { return m_lhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } - if (!evalToDest) - { - if(!alphaIsCompatible) - dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); - else - dest = MappedDest(actualDestPtr, dest.size()); - } - } + const LhsNestedCleaned& lhs() const { return m_lhs; } + const RhsNestedCleaned& rhs() const { return m_rhs; } + + protected: + + const LhsNested m_lhs; + const RhsNested m_rhs; }; -template<> struct gemv_selector +template +class ProductImpl : public internal::dense_xpr_base >::type { - template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) - { - typedef typename ProductType::LhsScalar LhsScalar; - typedef typename ProductType::RhsScalar RhsScalar; - typedef typename ProductType::Scalar ResScalar; - typedef typename ProductType::Index Index; - typedef typename ProductType::ActualLhsType ActualLhsType; - typedef typename ProductType::ActualRhsType ActualRhsType; - typedef typename ProductType::_ActualRhsType _ActualRhsType; - typedef typename ProductType::LhsBlasTraits LhsBlasTraits; - typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + typedef Product Derived; + public: - typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); - typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); - - ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) - * RhsBlasTraits::extractScalarFactor(prod.rhs()); - - enum { - // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 - // on, the other hand it is good for the cache to pack the vector anyways... - DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 - }; - - gemv_static_vector_if static_rhs; - - ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), - DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); - - if(!DirectlyUseRhs) - { - #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN - int size = actualRhs.size(); - EIGEN_DENSE_STORAGE_CTOR_PLUGIN - #endif - Map(actualRhsPtr, actualRhs.size()) = actualRhs; - } - - general_matrix_vector_product - ::run( - actualLhs.rows(), actualLhs.cols(), - &actualLhs.coeffRef(0,0), actualLhs.outerStride(), - actualRhsPtr, 1, - &dest.coeffRef(0,0), dest.innerStride(), - actualAlpha); - } + typedef typename internal::dense_xpr_base >::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) }; -template<> struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) - { - typedef typename Dest::Index Index; - // TODO makes sure dest is sequentially stored in memory, otherwise use a temp - const Index size = prod.rhs().rows(); - for(Index k=0; k struct gemv_selector -{ - template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) - { - typedef typename Dest::Index Index; - // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp - const Index rows = prod.rows(); - for(Index i=0; i -template -inline const typename ProductReturnType::Type -MatrixBase::operator*(const MatrixBase &other) const -{ - // A note regarding the function declaration: In MSVC, this function will sometimes - // not be inlined since DenseStorage is an unwindable object for dynamic - // matrices and product types are holding a member to store the result. - // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. - enum { - ProductIsValid = Derived::ColsAtCompileTime==Dynamic - || OtherDerived::RowsAtCompileTime==Dynamic - || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), - AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, - SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) - }; - // note to the lost user: - // * for a dot product use: v1.dot(v2) - // * for a coeff-wise product use: v1.cwiseProduct(v2) - EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), - INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) - EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), - INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) - EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) -#ifdef EIGEN_DEBUG_PRODUCT - internal::product_type::debug(); -#endif - return typename ProductReturnType::Type(derived(), other.derived()); -} - -/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. - * - * The returned product will behave like any other expressions: the coefficients of the product will be - * computed once at a time as requested. This might be useful in some extremely rare cases when only - * a small and no coherent fraction of the result's coefficients have to be computed. - * - * \warning This version of the matrix product can be much much slower. So use it only if you know - * what you are doing and that you measured a true speed improvement. - * - * \sa operator*(const MatrixBase&) - */ -template -template -const typename LazyProductReturnType::Type -MatrixBase::lazyProduct(const MatrixBase &other) const -{ - enum { - ProductIsValid = Derived::ColsAtCompileTime==Dynamic - || OtherDerived::RowsAtCompileTime==Dynamic - || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), - AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, - SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) - }; - // note to the lost user: - // * for a dot product use: v1.dot(v2) - // * for a coeff-wise product use: v1.cwiseProduct(v2) - EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), - INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) - EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), - INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) - EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) - - return typename LazyProductReturnType::Type(derived(), other.derived()); -} - #endif // EIGEN_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/ProductBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/ProductBase.h index 233ed6467..6cf02a649 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/ProductBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/ProductBase.h @@ -25,6 +25,8 @@ #ifndef EIGEN_PRODUCTBASE_H #define EIGEN_PRODUCTBASE_H +namespace Eigen { + /** \class ProductBase * \ingroup Core_Module * @@ -115,10 +117,10 @@ class ProductBase : public MatrixBase inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } template - inline void addTo(Dest& dst) const { scaleAndAddTo(dst,1); } + inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); } template - inline void subTo(Dest& dst) const { scaleAndAddTo(dst,-1); } + inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); } template inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); } @@ -152,7 +154,8 @@ class ProductBase : public MatrixBase #else EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); - return derived().coeff(row,col); + Matrix result = *this; + return result.coeff(row,col); #endif } @@ -160,7 +163,8 @@ class ProductBase : public MatrixBase { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); - return derived().coeff(i); + Matrix result = *this; + return result.coeff(i); } const Scalar& coeffRef(Index row, Index col) const @@ -179,8 +183,8 @@ class ProductBase : public MatrixBase protected: - const LhsNested m_lhs; - const RhsNested m_rhs; + LhsNested m_lhs; + RhsNested m_rhs; mutable PlainObject m_result; }; @@ -284,5 +288,6 @@ Derived& MatrixBase::lazyAssign(const ProductBase struct scalar_random_op { @@ -160,4 +162,6 @@ PlainObjectBase::setRandom(Index rows, Index cols) return setRandom(); } +} // end namespace Eigen + #endif // EIGEN_RANDOM_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Redux.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Redux.h index f9f5a95d5..d66ff00c1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Redux.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Redux.h @@ -26,6 +26,8 @@ #ifndef EIGEN_REDUX_H #define EIGEN_REDUX_H +namespace Eigen { + namespace internal { // TODO @@ -95,7 +97,7 @@ struct redux_novec_unroller typedef typename Derived::Scalar Scalar; - EIGEN_STRONG_INLINE static Scalar run(const Derived &mat, const Func& func) + static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func) { return func(redux_novec_unroller::run(mat,func), redux_novec_unroller::run(mat,func)); @@ -112,7 +114,7 @@ struct redux_novec_unroller typedef typename Derived::Scalar Scalar; - EIGEN_STRONG_INLINE static Scalar run(const Derived &mat, const Func&) + static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&) { return mat.coeffByOuterInner(outer, inner); } @@ -125,7 +127,7 @@ template struct redux_novec_unroller { typedef typename Derived::Scalar Scalar; - EIGEN_STRONG_INLINE static Scalar run(const Derived&, const Func&) { return Scalar(); } + static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); } }; /*** vectorization ***/ @@ -141,7 +143,7 @@ struct redux_vec_unroller typedef typename Derived::Scalar Scalar; typedef typename packet_traits::type PacketScalar; - EIGEN_STRONG_INLINE static PacketScalar run(const Derived &mat, const Func& func) + static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func) { return func.packetOp( redux_vec_unroller::run(mat,func), @@ -162,7 +164,7 @@ struct redux_vec_unroller typedef typename Derived::Scalar Scalar; typedef typename packet_traits::type PacketScalar; - EIGEN_STRONG_INLINE static PacketScalar run(const Derived &mat, const Func&) + static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&) { return mat.template packetByOuterInner(outer, inner); } @@ -214,20 +216,33 @@ struct redux_impl const Index size = mat.size(); eigen_assert(size && "you are using an empty matrix"); const Index packetSize = packet_traits::size; - const Index alignedStart = first_aligned(mat); + const Index alignedStart = internal::first_aligned(mat); enum { alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit) ? Aligned : Unaligned }; - const Index alignedSize = ((size-alignedStart)/packetSize)*packetSize; - const Index alignedEnd = alignedStart + alignedSize; + const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize); + const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize); + const Index alignedEnd2 = alignedStart + alignedSize2; + const Index alignedEnd = alignedStart + alignedSize; Scalar res; if(alignedSize) { - PacketScalar packet_res = mat.template packet(alignedStart); - for(Index index = alignedStart + packetSize; index < alignedEnd; index += packetSize) - packet_res = func.packetOp(packet_res, mat.template packet(index)); - res = func.predux(packet_res); + PacketScalar packet_res0 = mat.template packet(alignedStart); + if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop + { + PacketScalar packet_res1 = mat.template packet(alignedStart+packetSize); + for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize) + { + packet_res0 = func.packetOp(packet_res0, mat.template packet(index)); + packet_res1 = func.packetOp(packet_res1, mat.template packet(index+packetSize)); + } + + packet_res0 = func.packetOp(packet_res0,packet_res1); + if(alignedEnd>alignedEnd2) + packet_res0 = func.packetOp(packet_res0, mat.template packet(alignedEnd2)); + } + res = func.predux(packet_res0); for(Index index = 0; index < alignedStart; ++index) res = func(res,mat.coeff(index)); @@ -296,7 +311,7 @@ struct redux_impl Size = Derived::SizeAtCompileTime, VectorizedSize = (Size / PacketSize) * PacketSize }; - EIGEN_STRONG_INLINE static Scalar run(const Derived& mat, const Func& func) + static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func) { eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); Scalar res = func.predux(redux_vec_unroller::run(mat,func)); @@ -401,4 +416,6 @@ MatrixBase::trace() const return derived().diagonal().sum(); } +} // end namespace Eigen + #endif // EIGEN_REDUX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Replicate.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Replicate.h index d2f9712db..79e3578df 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Replicate.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Replicate.h @@ -25,6 +25,8 @@ #ifndef EIGEN_REPLICATE_H #define EIGEN_REPLICATE_H +namespace Eigen { + /** * \class Replicate * \ingroup Core_Module @@ -48,7 +50,10 @@ struct traits > typedef typename MatrixType::Scalar Scalar; typedef typename traits::StorageKind StorageKind; typedef typename traits::XprKind XprKind; - typedef typename nested::type MatrixTypeNested; + enum { + Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor + }; + typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type _MatrixTypeNested; enum { RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic @@ -72,6 +77,8 @@ struct traits > template class Replicate : public internal::dense_xpr_base< Replicate >::type { + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; public: typedef typename internal::dense_xpr_base::type Base; @@ -87,7 +94,7 @@ template class Replicate } template - inline Replicate(const OriginalMatrixType& matrix, int rowFactor, int colFactor) + inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), @@ -122,9 +129,13 @@ template class Replicate return m_matrix.template packet(actual_row, actual_col); } + const _MatrixTypeNested& nestedExpression() const + { + return m_matrix; + } protected: - const typename MatrixType::Nested m_matrix; + MatrixTypeNested m_matrix; const internal::variable_if_dynamic m_rowFactor; const internal::variable_if_dynamic m_colFactor; }; @@ -176,4 +187,6 @@ VectorwiseOp::replicate(Index factor) const (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); } +} // end namespace Eigen + #endif // EIGEN_REPLICATE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/ReturnByValue.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/ReturnByValue.h index 24c5a4e21..24b6a3f6a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/ReturnByValue.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/ReturnByValue.h @@ -26,6 +26,8 @@ #ifndef EIGEN_RETURNBYVALUE_H #define EIGEN_RETURNBYVALUE_H +namespace Eigen { + /** \class ReturnByValue * \ingroup Core_Module * @@ -96,4 +98,6 @@ Derived& DenseBase::operator=(const ReturnByValue& other) return derived(); } +} // end namespace Eigen + #endif // EIGEN_RETURNBYVALUE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h index 600744ae7..9e4e8a2bc 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h @@ -27,6 +27,8 @@ #ifndef EIGEN_REVERSE_H #define EIGEN_REVERSE_H +namespace Eigen { + /** \class Reverse * \ingroup Core_Module * @@ -183,8 +185,14 @@ template class Reverse m_matrix.const_cast_derived().template writePacket(m_matrix.size() - index - PacketSize, internal::preverse(x)); } + const typename internal::remove_all::type& + nestedExpression() const + { + return m_matrix; + } + protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; }; /** \returns an expression of the reverse of *this. @@ -226,5 +234,6 @@ inline void DenseBase::reverseInPlace() derived() = derived().reverse().eval(); } +} // end namespace Eigen #endif // EIGEN_REVERSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Select.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Select.h index d0cd66a26..92508a168 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Select.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Select.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SELECT_H #define EIGEN_SELECT_H +namespace Eigen { + /** \class Select * \ingroup Core_Module * @@ -101,10 +103,25 @@ class Select : internal::no_assignment_operator, return m_else.coeff(i); } + const ConditionMatrixType& conditionMatrix() const + { + return m_condition; + } + + const ThenMatrixType& thenMatrix() const + { + return m_then; + } + + const ElseMatrixType& elseMatrix() const + { + return m_else; + } + protected: - const typename ConditionMatrixType::Nested m_condition; - const typename ThenMatrixType::Nested m_then; - const typename ElseMatrixType::Nested m_else; + typename ConditionMatrixType::Nested m_condition; + typename ThenMatrixType::Nested m_then; + typename ElseMatrixType::Nested m_else; }; @@ -155,4 +172,6 @@ DenseBase::select(typename ElseDerived::Scalar thenScalar, derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived()); } +} // end namespace Eigen + #endif // EIGEN_SELECT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfAdjointView.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfAdjointView.h index 4bb68755e..086f05c49 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfAdjointView.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfAdjointView.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SELFADJOINTMATRIX_H #define EIGEN_SELFADJOINTMATRIX_H +namespace Eigen { + /** \class SelfAdjointView * \ingroup Core_Module * @@ -82,7 +84,7 @@ template class SelfAdjointView }; typedef typename MatrixType::PlainObject PlainObject; - inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) + inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix) {} inline Index rows() const { return m_matrix.rows(); } @@ -199,7 +201,7 @@ template class SelfAdjointView #endif protected: - const MatrixTypeNested m_matrix; + MatrixTypeNested m_matrix; }; @@ -222,7 +224,7 @@ struct triangular_assignment_selector::run(dst, src); @@ -236,7 +238,7 @@ struct triangular_assignment_selector struct triangular_assignment_selector { - inline static void run(Derived1 &, const Derived2 &) {} + static inline void run(Derived1 &, const Derived2 &) {} }; template @@ -247,7 +249,7 @@ struct triangular_assignment_selector::run(dst, src); @@ -261,14 +263,14 @@ struct triangular_assignment_selector struct triangular_assignment_selector { - inline static void run(Derived1 &, const Derived2 &) {} + static inline void run(Derived1 &, const Derived2 &) {} }; template struct triangular_assignment_selector { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { for(Index j = 0; j < dst.cols(); ++j) { @@ -285,7 +287,7 @@ struct triangular_assignment_selector struct triangular_assignment_selector { - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { typedef typename Derived1::Index Index; for(Index i = 0; i < dst.rows(); ++i) @@ -322,4 +324,6 @@ MatrixBase::selfadjointView() return derived(); } +} // end namespace Eigen + #endif // EIGEN_SELFADJOINTMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfCwiseBinaryOp.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfCwiseBinaryOp.h index 4e9ca8874..99389d7e2 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/SelfCwiseBinaryOp.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SELFCWISEBINARYOP_H #define EIGEN_SELFCWISEBINARYOP_H +namespace Eigen { + /** \class SelfCwiseBinaryOp * \ingroup Core_Module * @@ -163,6 +165,16 @@ template class SelfCwiseBinaryOp return Base::operator=(rhs); } + Lhs& expression() const + { + return m_matrix; + } + + const BinaryOp& functor() const + { + return m_functor; + } + protected: Lhs& m_matrix; const BinaryOp& m_functor; @@ -192,4 +204,6 @@ inline Derived& DenseBase::operator/=(const Scalar& other) return derived(); } +} // end namespace Eigen + #endif // EIGEN_SELFCWISEBINARYOP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h index a23014a34..ef09226a4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SOLVETRIANGULAR_H #define EIGEN_SOLVETRIANGULAR_H +namespace Eigen { + namespace internal { // Forward declarations: @@ -98,12 +100,22 @@ struct triangular_solver_selector typedef typename Rhs::Index Index; typedef blas_traits LhsProductTraits; typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; + static void run(const Lhs& lhs, Rhs& rhs) { - const ActualLhsType actualLhs = LhsProductTraits::extract(lhs); + typename internal::add_const_on_value_type::type actualLhs = LhsProductTraits::extract(lhs); + + const Index size = lhs.rows(); + const Index othersize = Side==OnTheLeft? rhs.cols() : rhs.rows(); + + typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar, + Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxRowsAtCompileTime,4> BlockingType; + + BlockingType blocking(rhs.rows(), rhs.cols(), size); + triangular_solve_matrix - ::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride()); + ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride(), blocking); } }; @@ -177,10 +189,8 @@ template void TriangularView::solveInPlace(const MatrixBase& _other) const { OtherDerived& other = _other.const_cast_derived(); - eigen_assert(cols() == rows()); - eigen_assert( (Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols()) ); - eigen_assert(!(Mode & ZeroDiag)); - eigen_assert((Mode & (Upper|Lower)) != 0); + eigen_assert( cols() == rows() && ((Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols())) ); + eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); enum { copy = internal::traits::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime }; typedef typename internal::conditional struct triangular_solv protected: const TriangularType& m_triangularMatrix; - const typename Rhs::Nested m_rhs; + typename Rhs::Nested m_rhs; }; } // namespace internal +} // end namespace Eigen + #endif // EIGEN_SOLVETRIANGULAR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h index f667272e4..16514c86a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h @@ -25,6 +25,8 @@ #ifndef EIGEN_STABLENORM_H #define EIGEN_STABLENORM_H +namespace Eigen { + namespace internal { template inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) @@ -58,9 +60,9 @@ MatrixBase::stableNorm() const { using std::min; const Index blockSize = 4096; - RealScalar scale = 0; - RealScalar invScale = 1; - RealScalar ssq = 0; // sum of square + RealScalar scale(0); + RealScalar invScale(1); + RealScalar ssq(0); // sum of square enum { Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0 }; @@ -187,4 +189,6 @@ MatrixBase::hypotNorm() const return this->cwiseAbs().redux(internal::scalar_hypot_op()); } +} // end namespace Eigen + #endif // EIGEN_STABLENORM_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Stride.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Stride.h index 0430f1116..73c54e6bf 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Stride.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Stride.h @@ -25,6 +25,8 @@ #ifndef EIGEN_STRIDE_H #define EIGEN_STRIDE_H +namespace Eigen { + /** \class Stride * \ingroup Core_Module * @@ -116,4 +118,6 @@ class OuterStride : public Stride OuterStride(Index v) : Base(v,0) {} }; +} // end namespace Eigen + #endif // EIGEN_STRIDE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Swap.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Swap.h index 5fb032866..deb1d2831 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Swap.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Swap.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SWAP_H #define EIGEN_SWAP_H +namespace Eigen { + /** \class SwapWrapper * \ingroup Core_Module * @@ -52,6 +54,15 @@ template class SwapWrapper inline Index cols() const { return m_expression.cols(); } inline Index outerStride() const { return m_expression.outerStride(); } inline Index innerStride() const { return m_expression.innerStride(); } + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + inline const Scalar* data() const { return m_expression.data(); } inline Scalar& coeffRef(Index row, Index col) { @@ -119,8 +130,12 @@ template class SwapWrapper _other.template writePacket(index, tmp); } + ExpressionType& expression() const { return m_expression; } + protected: ExpressionType& m_expression; }; +} // end namespace Eigen + #endif // EIGEN_SWAP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h index 3f7c7df6e..c62f74764 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h @@ -26,6 +26,8 @@ #ifndef EIGEN_TRANSPOSE_H #define EIGEN_TRANSPOSE_H +namespace Eigen { + /** \class Transpose * \ingroup Core_Module * @@ -91,7 +93,7 @@ template class Transpose nestedExpression() { return m_matrix.const_cast_derived(); } protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; }; namespace internal { @@ -152,12 +154,12 @@ template class TransposeImpl return derived().nestedExpression().coeffRef(index); } - inline const CoeffReturnType coeff(Index row, Index col) const + inline CoeffReturnType coeff(Index row, Index col) const { return derived().nestedExpression().coeff(col, row); } - inline const CoeffReturnType coeff(Index index) const + inline CoeffReturnType coeff(Index index) const { return derived().nestedExpression().coeff(index); } @@ -422,4 +424,6 @@ void DenseBase::checkTransposeAliasing(const OtherDerived& other) const } #endif +} // end namespace Eigen + #endif // EIGEN_TRANSPOSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h index 88fdfb222..fa37822f8 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h @@ -25,6 +25,8 @@ #ifndef EIGEN_TRANSPOSITIONS_H #define EIGEN_TRANSPOSITIONS_H +namespace Eigen { + /** \class Transpositions * \ingroup Core_Module * @@ -404,7 +406,7 @@ struct transposition_matrix_product_retval protected: const TranspositionType& m_transpositions; - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; }; } // end namespace internal @@ -444,4 +446,6 @@ class Transpose > const TranspositionType& m_transpositions; }; +} // end namespace Eigen + #endif // EIGEN_TRANSPOSITIONS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h index 033e81036..5e97e4052 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h @@ -26,6 +26,8 @@ #ifndef EIGEN_TRIANGULARMATRIX_H #define EIGEN_TRIANGULARMATRIX_H +namespace Eigen { + namespace internal { template struct triangular_solve_retval; @@ -273,11 +275,8 @@ template class TriangularView inline const TriangularView conjugate() const { return m_matrix.conjugate(); } - /** \sa MatrixBase::adjoint() */ - inline TriangularView adjoint() - { return m_matrix.adjoint(); } /** \sa MatrixBase::adjoint() const */ - inline const TriangularView adjoint() const + inline const TriangularView adjoint() const { return m_matrix.adjoint(); } /** \sa MatrixBase::transpose() */ @@ -288,11 +287,13 @@ template class TriangularView } /** \sa MatrixBase::transpose() const */ inline const TriangularView,TransposeMode> transpose() const - { return m_matrix.transpose(); } + { + return m_matrix.transpose(); + } /** Efficient triangular matrix times vector/matrix product */ template - TriangularProduct + TriangularProduct operator*(const MatrixBase& rhs) const { return TriangularProduct @@ -375,7 +376,8 @@ template class TriangularView template void swap(MatrixBase const & other) { - TriangularView,Mode>(const_cast(m_matrix)).lazyAssign(other.derived()); + SwapWrapper swaper(const_cast(m_matrix)); + TriangularView,Mode>(swaper).lazyAssign(other.derived()); } Scalar determinant() const @@ -433,7 +435,7 @@ template class TriangularView template EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase& prod, const Scalar& alpha); - const MatrixTypeNested m_matrix; + MatrixTypeNested m_matrix; }; /*************************************************************************** @@ -452,7 +454,7 @@ struct triangular_assignment_selector typedef typename Derived1::Scalar Scalar; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { triangular_assignment_selector::run(dst, src); @@ -480,7 +482,7 @@ struct triangular_assignment_selector template struct triangular_assignment_selector { - inline static void run(Derived1 &, const Derived2 &) {} + static inline void run(Derived1 &, const Derived2 &) {} }; template @@ -488,7 +490,7 @@ struct triangular_assignment_selector struct triangular_assignment_selector { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { for(Index j = 0; j < dst.cols(); ++j) { @@ -524,7 +526,7 @@ template struct triangular_assignment_selector { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { for(Index j = 0; j < dst.cols(); ++j) { @@ -542,7 +544,7 @@ template struct triangular_assignment_selector { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { for(Index j = 0; j < dst.cols(); ++j) { @@ -560,7 +562,7 @@ template struct triangular_assignment_selector { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { for(Index j = 0; j < dst.cols(); ++j) { @@ -580,7 +582,7 @@ template struct triangular_assignment_selector { typedef typename Derived1::Index Index; - inline static void run(Derived1 &dst, const Derived2 &src) + static inline void run(Derived1 &dst, const Derived2 &src) { for(Index j = 0; j < dst.cols(); ++j) { @@ -835,4 +837,6 @@ bool MatrixBase::isLowerTriangular(RealScalar prec) const return true; } +} // end namespace Eigen + #endif // EIGEN_TRIANGULARMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorBlock.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorBlock.h index 858e4c786..66c9fd21a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorBlock.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorBlock.h @@ -26,6 +26,8 @@ #ifndef EIGEN_VECTORBLOCK_H #define EIGEN_VECTORBLOCK_H +namespace Eigen { + /** \class VectorBlock * \ingroup Core_Module * @@ -292,5 +294,6 @@ DenseBase::tail() const return typename ConstFixedSegmentReturnType::Type(derived(), size() - Size); } +} // end namespace Eigen #endif // EIGEN_VECTORBLOCK_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorwiseOp.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorwiseOp.h index 20f688157..7b5be7cd5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorwiseOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/VectorwiseOp.h @@ -26,6 +26,8 @@ #ifndef EIGEN_PARTIAL_REDUX_H #define EIGEN_PARTIAL_REDUX_H +namespace Eigen { + /** \class PartialReduxExpr * \ingroup Core_Module * @@ -110,7 +112,7 @@ class PartialReduxExpr : internal::no_assignment_operator, } protected: - const MatrixTypeNested m_matrix; + MatrixTypeNested m_matrix; const MemberOp m_functor; }; @@ -237,7 +239,10 @@ template class VectorwiseOp typename ExtendedType::Type extendedTo(const DenseBase& other) const { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived); + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Vertical, OtherDerived::MaxColsAtCompileTime==1), + YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED) + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Horizontal, OtherDerived::MaxRowsAtCompileTime==1), + YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED) return typename ExtendedType::Type (other.derived(), Direction==Vertical ? 1 : m_matrix.rows(), @@ -418,10 +423,9 @@ template class VectorwiseOp ExpressionType& operator=(const DenseBase& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME - for(Index j=0; j(m_matrix); + return const_cast(m_matrix = extendedTo(other.derived())); } /** Adds the vector \a other to each subvector of \c *this */ @@ -429,9 +433,8 @@ template class VectorwiseOp ExpressionType& operator+=(const DenseBase& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) - for(Index j=0; j(m_matrix); + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return const_cast(m_matrix += extendedTo(other.derived())); } /** Substracts the vector \a other to each subvector of \c *this */ @@ -439,8 +442,29 @@ template class VectorwiseOp ExpressionType& operator-=(const DenseBase& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) - for(Index j=0; j(m_matrix -= extendedTo(other.derived())); + } + + /** Multiples each subvector of \c *this by the vector \a other */ + template + ExpressionType& operator*=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + m_matrix *= extendedTo(other.derived()); + return const_cast(m_matrix); + } + + /** Divides each subvector of \c *this by the vector \a other */ + template + ExpressionType& operator/=(const DenseBase& other) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + m_matrix /= extendedTo(other.derived()); return const_cast(m_matrix); } @@ -451,7 +475,8 @@ template class VectorwiseOp const typename ExtendedType::Type> operator+(const DenseBase& other) const { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return m_matrix + extendedTo(other.derived()); } @@ -462,10 +487,39 @@ template class VectorwiseOp const typename ExtendedType::Type> operator-(const DenseBase& other) const { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return m_matrix - extendedTo(other.derived()); } + /** Returns the expression where each subvector is the product of the vector \a other + * by the corresponding subvector of \c *this */ + template EIGEN_STRONG_INLINE + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> + operator*(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return m_matrix * extendedTo(other.derived()); + } + + /** Returns the expression where each subvector is the quotient of the corresponding + * subvector of \c *this by the vector \a other */ + template + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> + operator/(const DenseBase& other) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) + EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) + return m_matrix / extendedTo(other.derived()); + } + /////////// Geometry module /////////// #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS @@ -509,7 +563,7 @@ template class VectorwiseOp * Example: \include MatrixBase_colwise.cpp * Output: \verbinclude MatrixBase_colwise.out * - * \sa rowwise(), class VectorwiseOp + * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ template inline const typename DenseBase::ConstColwiseReturnType @@ -520,7 +574,7 @@ DenseBase::colwise() const /** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations * - * \sa rowwise(), class VectorwiseOp + * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ template inline typename DenseBase::ColwiseReturnType @@ -534,7 +588,7 @@ DenseBase::colwise() * Example: \include MatrixBase_rowwise.cpp * Output: \verbinclude MatrixBase_rowwise.out * - * \sa colwise(), class VectorwiseOp + * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ template inline const typename DenseBase::ConstRowwiseReturnType @@ -545,7 +599,7 @@ DenseBase::rowwise() const /** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations * - * \sa colwise(), class VectorwiseOp + * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ template inline typename DenseBase::RowwiseReturnType @@ -554,4 +608,6 @@ DenseBase::rowwise() return derived(); } +} // end namespace Eigen + #endif // EIGEN_PARTIAL_REDUX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Visitor.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Visitor.h index 378ebcba1..fd04fd978 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Visitor.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Visitor.h @@ -25,6 +25,8 @@ #ifndef EIGEN_VISITOR_H #define EIGEN_VISITOR_H +namespace Eigen { + namespace internal { template @@ -35,7 +37,7 @@ struct visitor_impl row = (UnrollCount-1) % Derived::RowsAtCompileTime }; - inline static void run(const Derived &mat, Visitor& visitor) + static inline void run(const Derived &mat, Visitor& visitor) { visitor_impl::run(mat, visitor); visitor(mat.coeff(row, col), row, col); @@ -45,7 +47,7 @@ struct visitor_impl template struct visitor_impl { - inline static void run(const Derived &mat, Visitor& visitor) + static inline void run(const Derived &mat, Visitor& visitor) { return visitor.init(mat.coeff(0, 0), 0, 0); } @@ -55,7 +57,7 @@ template struct visitor_impl { typedef typename Derived::Index Index; - inline static void run(const Derived& mat, Visitor& visitor) + static inline void run(const Derived& mat, Visitor& visitor) { visitor.init(mat.coeff(0,0), 0, 0); for(Index i = 1; i < mat.rows(); ++i) @@ -245,4 +247,6 @@ DenseBase::maxCoeff(IndexType* index) const return maxVisitor.res; } +} // end namespace Eigen + #endif // EIGEN_VISITOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/Complex.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/Complex.h index f8adf1b63..b2d866b71 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/Complex.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/Complex.h @@ -25,6 +25,8 @@ #ifndef EIGEN_COMPLEX_ALTIVEC_H #define EIGEN_COMPLEX_ALTIVEC_H +namespace Eigen { + namespace internal { static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; @@ -168,7 +170,7 @@ template<> EIGEN_STRONG_INLINE std::complex predux_mul(const P template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) + static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second) { if (Offset==1) { @@ -225,4 +227,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& x } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_COMPLEX_ALTIVEC_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h index dc34ebbd6..5b62b4c31 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -25,6 +25,8 @@ #ifndef EIGEN_PACKET_MATH_ALTIVEC_H #define EIGEN_PACKET_MATH_ALTIVEC_H +namespace Eigen { + namespace internal { #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD @@ -487,7 +489,7 @@ template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) { if (Offset!=0) first = vec_sld(first, second, Offset*4); @@ -497,7 +499,7 @@ struct palign_impl template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) { if (Offset!=0) first = vec_sld(first, second, Offset*4); @@ -506,4 +508,6 @@ struct palign_impl } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_PACKET_MATH_ALTIVEC_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/Complex.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/Complex.h index 212887184..72abb6f4a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/Complex.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/Complex.h @@ -25,6 +25,8 @@ #ifndef EIGEN_COMPLEX_NEON_H #define EIGEN_COMPLEX_NEON_H +namespace Eigen { + namespace internal { static uint32x4_t p4ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET4(0x00000000, 0x80000000, 0x00000000, 0x80000000); @@ -267,4 +269,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, con } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_COMPLEX_NEON_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/PacketMath.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/PacketMath.h index 6c7cd1590..1eb082a5b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/NEON/PacketMath.h @@ -27,6 +27,8 @@ #ifndef EIGEN_PACKET_MATH_NEON_H #define EIGEN_PACKET_MATH_NEON_H +namespace Eigen { + namespace internal { #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD @@ -158,7 +160,8 @@ template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, co } // for some weird raisons, it has to be overloaded for packet of integers -template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } @@ -431,4 +434,6 @@ PALIGN_NEON(3,Packet4i,vextq_s32) } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_PACKET_MATH_NEON_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/Complex.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/Complex.h index c352bb3e6..1615886ac 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/Complex.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/Complex.h @@ -25,6 +25,8 @@ #ifndef EIGEN_COMPLEX_SSE_H #define EIGEN_COMPLEX_SSE_H +namespace Eigen { + namespace internal { //---------- float ---------- @@ -102,7 +104,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex(&from)); #else res.v = _mm_loadl_pi(res.v, (const __m64*)&from); #endif @@ -151,7 +153,7 @@ template<> EIGEN_STRONG_INLINE std::complex predux_mul(const P template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) + static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second) { if (Offset==1) { @@ -350,7 +352,7 @@ template<> EIGEN_STRONG_INLINE std::complex predux_mul(const template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) + static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) { // FIXME is it sure we never have to align a Packet1cd? // Even though a std::complex has 16 bytes, it is not necessarily aligned on a 16 bytes boundary... @@ -444,4 +446,6 @@ EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_COMPLEX_SSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/MathFunctions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/MathFunctions.h index 9d56d8218..de2f06d5f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -30,6 +30,8 @@ #ifndef EIGEN_MATH_FUNCTIONS_SSE_H #define EIGEN_MATH_FUNCTIONS_SSE_H +namespace Eigen { + namespace internal { template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED @@ -121,7 +123,7 @@ Packet4f pexp(const Packet4f& _x) _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); - _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647949f); + _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); @@ -168,7 +170,7 @@ Packet4f pexp(const Packet4f& _x) y = pmadd(y, z, x); y = padd(y, p4f_1); - /* build 2^n */ + // build 2^n emm0 = _mm_cvttps_epi32(fx); emm0 = _mm_add_epi32(emm0, p4i_0x7f); emm0 = _mm_slli_epi32(emm0, 23); @@ -392,4 +394,6 @@ Packet4f psqrt(const Packet4f& _x) } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_MATH_FUNCTIONS_SSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/PacketMath.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/PacketMath.h index 908e27368..8faeeefc9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/arch/SSE/PacketMath.h @@ -25,6 +25,8 @@ #ifndef EIGEN_PACKET_MATH_SSE_H #define EIGEN_PACKET_MATH_SSE_H +namespace Eigen { + namespace internal { #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD @@ -110,9 +112,18 @@ template<> struct unpacket_traits { typedef float type; enum {size=4} template<> struct unpacket_traits { typedef double type; enum {size=2}; }; template<> struct unpacket_traits { typedef int type; enum {size=4}; }; +#if defined(_MSC_VER) && (_MSC_VER==1500) +// Workaround MSVC 9 internal compiler error. +// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode +// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)). +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps(from,from,from,from); } +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set_pd(from,from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set_epi32(from,from,from,from); } +#else template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set1_ps(from); } template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set1_pd(from); } template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set1_epi32(from); } +#endif template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return _mm_add_ps(pset1(a), _mm_set_ps(3,2,1,0)); } template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return _mm_add_pd(pset1(a),_mm_set_pd(1,0)); } @@ -282,7 +293,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { - return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd((const double*)from)), 0, 0, 1, 1); + return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast(from))), 0, 0, 1, 1); } template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) { return pset1(from[0]); } @@ -302,8 +313,8 @@ template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& _mm_storel_pd((to), from); _mm_storeh_pd((to+1), from); } -template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, _mm_castps_pd(from)); } -template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, _mm_castsi128_pd(from)); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast(to), _mm_castps_pd(from)); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast(to), _mm_castsi128_pd(from)); } // some compilers might be tempted to perform multiple moves instead of using a vector path. template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) @@ -541,7 +552,7 @@ template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) { if (Offset!=0) first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4)); @@ -551,7 +562,7 @@ struct palign_impl template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) { if (Offset!=0) first = _mm_alignr_epi8(second,first, Offset*4); @@ -561,7 +572,7 @@ struct palign_impl template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet2d& first, const Packet2d& second) + static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) { if (Offset==1) first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8)); @@ -572,7 +583,7 @@ struct palign_impl template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) + static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second) { if (Offset==1) { @@ -595,7 +606,7 @@ struct palign_impl template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) + static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second) { if (Offset==1) { @@ -618,7 +629,7 @@ struct palign_impl template struct palign_impl { - EIGEN_STRONG_INLINE static void run(Packet2d& first, const Packet2d& second) + static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second) { if (Offset==1) { @@ -631,4 +642,6 @@ struct palign_impl } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_PACKET_MATH_SSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/CoeffBasedProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/CoeffBasedProduct.h index dc20f7e1e..8f53c43ad 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/CoeffBasedProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/CoeffBasedProduct.h @@ -26,6 +26,8 @@ #ifndef EIGEN_COEFFBASED_PRODUCT_H #define EIGEN_COEFFBASED_PRODUCT_H +namespace Eigen { + namespace internal { /********************************************************************************* @@ -224,8 +226,8 @@ class CoeffBasedProduct { return reinterpret_cast(*this).diagonal(index); } protected: - const LhsNested m_lhs; - const RhsNested m_rhs; + typename internal::add_const_on_value_type::type m_lhs; + typename internal::add_const_on_value_type::type m_rhs; mutable PlainObject m_result; }; @@ -252,7 +254,7 @@ template struct product_coeff_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { product_coeff_impl::run(row, col, lhs, rhs, res); res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col); @@ -263,7 +265,7 @@ template struct product_coeff_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { res = lhs.coeff(row, 0) * rhs.coeff(0, col); } @@ -273,7 +275,7 @@ template struct product_coeff_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) { eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = lhs.coeff(row, 0) * rhs.coeff(0, col); @@ -291,7 +293,7 @@ struct product_coeff_vectorized_unroller { typedef typename Lhs::Index Index; enum { PacketSize = packet_traits::size }; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) { product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); pres = padd(pres, pmul( lhs.template packet(row, UnrollingIndex) , rhs.template packet(UnrollingIndex, col) )); @@ -302,7 +304,7 @@ template struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet> { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) { pres = pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); } @@ -314,7 +316,7 @@ struct product_coeff_impl::size }; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { Packet pres; product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); @@ -327,7 +329,7 @@ template struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + static EIGEN_STRONG_INLINE void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.transpose().cwiseProduct(rhs.col(col)).sum(); } @@ -349,7 +351,7 @@ template struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + static EIGEN_STRONG_INLINE void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.row(row).transpose().cwiseProduct(rhs).sum(); } @@ -359,7 +361,7 @@ template struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.transpose().cwiseProduct(rhs).sum(); } @@ -369,7 +371,7 @@ template struct product_coeff_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, res); } @@ -383,7 +385,7 @@ template { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { product_packet_impl::run(row, col, lhs, rhs, res); res = pmadd(pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); @@ -394,7 +396,7 @@ template { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { product_packet_impl::run(row, col, lhs, rhs, res); res = pmadd(lhs.template packet(row, UnrollingIndex), pset1(rhs.coeff(UnrollingIndex, col)), res); @@ -405,7 +407,7 @@ template struct product_packet_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); } @@ -415,7 +417,7 @@ template struct product_packet_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); } @@ -425,7 +427,7 @@ template struct product_packet_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) { eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); @@ -438,7 +440,7 @@ template struct product_packet_impl { typedef typename Lhs::Index Index; - EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) + static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) { eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); @@ -449,4 +451,6 @@ struct product_packet_impl } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_COEFFBASED_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h index cd1c37c78..d631fa28e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -25,12 +25,16 @@ #ifndef EIGEN_GENERAL_BLOCK_PANEL_H #define EIGEN_GENERAL_BLOCK_PANEL_H +namespace Eigen { + namespace internal { template class gebp_traits; -inline std::ptrdiff_t manage_caching_sizes_second_if_negative(std::ptrdiff_t a, std::ptrdiff_t b) + +/** \internal \returns b if a<=0, and returns a otherwise. */ +inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b) { return a<=0 ? b : a; } @@ -38,9 +42,14 @@ inline std::ptrdiff_t manage_caching_sizes_second_if_negative(std::ptrdiff_t a, /** \internal */ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0) { - static std::ptrdiff_t m_l1CacheSize = manage_caching_sizes_second_if_negative(queryL1CacheSize(),8 * 1024); - static std::ptrdiff_t m_l2CacheSize = manage_caching_sizes_second_if_negative(queryTopLevelCacheSize(),1*1024*1024); - + static std::ptrdiff_t m_l1CacheSize = 0; + static std::ptrdiff_t m_l2CacheSize = 0; + if(m_l2CacheSize==0) + { + m_l1CacheSize = manage_caching_sizes_helper(queryL1CacheSize(),8 * 1024); + m_l2CacheSize = manage_caching_sizes_helper(queryTopLevelCacheSize(),1*1024*1024); + } + if(action==SetAction) { // set the cpu cache size and cache all block sizes from a global cache size in byte @@ -533,7 +542,7 @@ struct gebp_kernel ResPacketSize = Traits::ResPacketSize }; - EIGEN_FLATTEN_ATTRIB + EIGEN_DONT_INLINE EIGEN_FLATTEN_ATTRIB void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0) { @@ -595,64 +604,64 @@ struct gebp_kernel if(nr==2) { LhsPacket A0, A1; - RhsPacket B0; + RhsPacket B_0; RhsPacket T0; EIGEN_ASM_COMMENT("mybegin2"); traits.loadLhs(&blA[0*LhsProgress], A0); traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B0); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[1*RhsProgress], B0); - traits.madd(A0,B0,C1,T0); - traits.madd(A1,B0,C5,B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[1*RhsProgress], B_0); + traits.madd(A0,B_0,C1,T0); + traits.madd(A1,B_0,C5,B_0); traits.loadLhs(&blA[2*LhsProgress], A0); traits.loadLhs(&blA[3*LhsProgress], A1); - traits.loadRhs(&blB[2*RhsProgress], B0); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[3*RhsProgress], B0); - traits.madd(A0,B0,C1,T0); - traits.madd(A1,B0,C5,B0); + traits.loadRhs(&blB[2*RhsProgress], B_0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[3*RhsProgress], B_0); + traits.madd(A0,B_0,C1,T0); + traits.madd(A1,B_0,C5,B_0); traits.loadLhs(&blA[4*LhsProgress], A0); traits.loadLhs(&blA[5*LhsProgress], A1); - traits.loadRhs(&blB[4*RhsProgress], B0); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[5*RhsProgress], B0); - traits.madd(A0,B0,C1,T0); - traits.madd(A1,B0,C5,B0); + traits.loadRhs(&blB[4*RhsProgress], B_0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[5*RhsProgress], B_0); + traits.madd(A0,B_0,C1,T0); + traits.madd(A1,B_0,C5,B_0); traits.loadLhs(&blA[6*LhsProgress], A0); traits.loadLhs(&blA[7*LhsProgress], A1); - traits.loadRhs(&blB[6*RhsProgress], B0); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[7*RhsProgress], B0); - traits.madd(A0,B0,C1,T0); - traits.madd(A1,B0,C5,B0); + traits.loadRhs(&blB[6*RhsProgress], B_0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[7*RhsProgress], B_0); + traits.madd(A0,B_0,C1,T0); + traits.madd(A1,B_0,C5,B_0); EIGEN_ASM_COMMENT("myend"); } else { EIGEN_ASM_COMMENT("mybegin4"); LhsPacket A0, A1; - RhsPacket B0, B1, B2, B3; + RhsPacket B_0, B1, B2, B3; RhsPacket T0; traits.loadLhs(&blA[0*LhsProgress], A0); traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B0,C0,T0); + traits.madd(A0,B_0,C0,T0); traits.loadRhs(&blB[2*RhsProgress], B2); - traits.madd(A1,B0,C4,B0); + traits.madd(A1,B_0,C4,B_0); traits.loadRhs(&blB[3*RhsProgress], B3); - traits.loadRhs(&blB[4*RhsProgress], B0); + traits.loadRhs(&blB[4*RhsProgress], B_0); traits.madd(A0,B1,C1,T0); traits.madd(A1,B1,C5,B1); traits.loadRhs(&blB[5*RhsProgress], B1); @@ -664,9 +673,9 @@ EIGEN_ASM_COMMENT("mybegin4"); traits.madd(A1,B3,C7,B3); traits.loadLhs(&blA[3*LhsProgress], A1); traits.loadRhs(&blB[7*RhsProgress], B3); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[8*RhsProgress], B0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[8*RhsProgress], B_0); traits.madd(A0,B1,C1,T0); traits.madd(A1,B1,C5,B1); traits.loadRhs(&blB[9*RhsProgress], B1); @@ -679,9 +688,9 @@ EIGEN_ASM_COMMENT("mybegin4"); traits.loadLhs(&blA[5*LhsProgress], A1); traits.loadRhs(&blB[11*RhsProgress], B3); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[12*RhsProgress], B0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[12*RhsProgress], B_0); traits.madd(A0,B1,C1,T0); traits.madd(A1,B1,C5,B1); traits.loadRhs(&blB[13*RhsProgress], B1); @@ -693,8 +702,8 @@ EIGEN_ASM_COMMENT("mybegin4"); traits.madd(A1,B3,C7,B3); traits.loadLhs(&blA[7*LhsProgress], A1); traits.loadRhs(&blB[15*RhsProgress], B3); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); traits.madd(A0,B1,C1,T0); traits.madd(A1,B1,C5,B1); traits.madd(A0,B2,C2,T0); @@ -712,32 +721,32 @@ EIGEN_ASM_COMMENT("mybegin4"); if(nr==2) { LhsPacket A0, A1; - RhsPacket B0; + RhsPacket B_0; RhsPacket T0; traits.loadLhs(&blA[0*LhsProgress], A0); traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B0); - traits.madd(A0,B0,C0,T0); - traits.madd(A1,B0,C4,B0); - traits.loadRhs(&blB[1*RhsProgress], B0); - traits.madd(A0,B0,C1,T0); - traits.madd(A1,B0,C5,B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); + traits.madd(A0,B_0,C0,T0); + traits.madd(A1,B_0,C4,B_0); + traits.loadRhs(&blB[1*RhsProgress], B_0); + traits.madd(A0,B_0,C1,T0); + traits.madd(A1,B_0,C5,B_0); } else { LhsPacket A0, A1; - RhsPacket B0, B1, B2, B3; + RhsPacket B_0, B1, B2, B3; RhsPacket T0; traits.loadLhs(&blA[0*LhsProgress], A0); traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B0,C0,T0); + traits.madd(A0,B_0,C0,T0); traits.loadRhs(&blB[2*RhsProgress], B2); - traits.madd(A1,B0,C4,B0); + traits.madd(A1,B_0,C4,B_0); traits.loadRhs(&blB[3*RhsProgress], B3); traits.madd(A0,B1,C1,T0); traits.madd(A1,B1,C5,B1); @@ -824,42 +833,42 @@ EIGEN_ASM_COMMENT("mybegin4"); if(nr==2) { LhsPacket A0; - RhsPacket B0, B1; + RhsPacket B_0, B1; traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B0,C0,B0); - traits.loadRhs(&blB[2*RhsProgress], B0); + traits.madd(A0,B_0,C0,B_0); + traits.loadRhs(&blB[2*RhsProgress], B_0); traits.madd(A0,B1,C1,B1); traits.loadLhs(&blA[1*LhsProgress], A0); traits.loadRhs(&blB[3*RhsProgress], B1); - traits.madd(A0,B0,C0,B0); - traits.loadRhs(&blB[4*RhsProgress], B0); + traits.madd(A0,B_0,C0,B_0); + traits.loadRhs(&blB[4*RhsProgress], B_0); traits.madd(A0,B1,C1,B1); traits.loadLhs(&blA[2*LhsProgress], A0); traits.loadRhs(&blB[5*RhsProgress], B1); - traits.madd(A0,B0,C0,B0); - traits.loadRhs(&blB[6*RhsProgress], B0); + traits.madd(A0,B_0,C0,B_0); + traits.loadRhs(&blB[6*RhsProgress], B_0); traits.madd(A0,B1,C1,B1); traits.loadLhs(&blA[3*LhsProgress], A0); traits.loadRhs(&blB[7*RhsProgress], B1); - traits.madd(A0,B0,C0,B0); + traits.madd(A0,B_0,C0,B_0); traits.madd(A0,B1,C1,B1); } else { LhsPacket A0; - RhsPacket B0, B1, B2, B3; + RhsPacket B_0, B1, B2, B3; traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B0,C0,B0); + traits.madd(A0,B_0,C0,B_0); traits.loadRhs(&blB[2*RhsProgress], B2); traits.loadRhs(&blB[3*RhsProgress], B3); - traits.loadRhs(&blB[4*RhsProgress], B0); + traits.loadRhs(&blB[4*RhsProgress], B_0); traits.madd(A0,B1,C1,B1); traits.loadRhs(&blB[5*RhsProgress], B1); traits.madd(A0,B2,C2,B2); @@ -867,8 +876,8 @@ EIGEN_ASM_COMMENT("mybegin4"); traits.madd(A0,B3,C3,B3); traits.loadLhs(&blA[1*LhsProgress], A0); traits.loadRhs(&blB[7*RhsProgress], B3); - traits.madd(A0,B0,C0,B0); - traits.loadRhs(&blB[8*RhsProgress], B0); + traits.madd(A0,B_0,C0,B_0); + traits.loadRhs(&blB[8*RhsProgress], B_0); traits.madd(A0,B1,C1,B1); traits.loadRhs(&blB[9*RhsProgress], B1); traits.madd(A0,B2,C2,B2); @@ -877,8 +886,8 @@ EIGEN_ASM_COMMENT("mybegin4"); traits.loadLhs(&blA[2*LhsProgress], A0); traits.loadRhs(&blB[11*RhsProgress], B3); - traits.madd(A0,B0,C0,B0); - traits.loadRhs(&blB[12*RhsProgress], B0); + traits.madd(A0,B_0,C0,B_0); + traits.loadRhs(&blB[12*RhsProgress], B_0); traits.madd(A0,B1,C1,B1); traits.loadRhs(&blB[13*RhsProgress], B1); traits.madd(A0,B2,C2,B2); @@ -887,7 +896,7 @@ EIGEN_ASM_COMMENT("mybegin4"); traits.loadLhs(&blA[3*LhsProgress], A0); traits.loadRhs(&blB[15*RhsProgress], B3); - traits.madd(A0,B0,C0,B0); + traits.madd(A0,B_0,C0,B_0); traits.madd(A0,B1,C1,B1); traits.madd(A0,B2,C2,B2); traits.madd(A0,B3,C3,B3); @@ -902,26 +911,26 @@ EIGEN_ASM_COMMENT("mybegin4"); if(nr==2) { LhsPacket A0; - RhsPacket B0, B1; + RhsPacket B_0, B1; traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B0,C0,B0); + traits.madd(A0,B_0,C0,B_0); traits.madd(A0,B1,C1,B1); } else { LhsPacket A0; - RhsPacket B0, B1, B2, B3; + RhsPacket B_0, B1, B2, B3; traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B0); + traits.loadRhs(&blB[0*RhsProgress], B_0); traits.loadRhs(&blB[1*RhsProgress], B1); traits.loadRhs(&blB[2*RhsProgress], B2); traits.loadRhs(&blB[3*RhsProgress], B3); - traits.madd(A0,B0,C0,B0); + traits.madd(A0,B_0,C0,B_0); traits.madd(A0,B1,C1,B1); traits.madd(A0,B2,C2,B2); traits.madd(A0,B3,C3,B3); @@ -968,26 +977,26 @@ EIGEN_ASM_COMMENT("mybegin4"); if(nr==2) { LhsScalar A0; - RhsScalar B0, B1; + RhsScalar B_0, B1; A0 = blA[k]; - B0 = blB[0]; + B_0 = blB[0]; B1 = blB[1]; - MADD(cj,A0,B0,C0,B0); + MADD(cj,A0,B_0,C0,B_0); MADD(cj,A0,B1,C1,B1); } else { LhsScalar A0; - RhsScalar B0, B1, B2, B3; + RhsScalar B_0, B1, B2, B3; A0 = blA[k]; - B0 = blB[0]; + B_0 = blB[0]; B1 = blB[1]; B2 = blB[2]; B3 = blB[3]; - MADD(cj,A0,B0,C0,B0); + MADD(cj,A0,B_0,C0,B_0); MADD(cj,A0,B1,C1,B1); MADD(cj,A0,B2,C2,B2); MADD(cj,A0,B3,C3,B3); @@ -1024,14 +1033,14 @@ EIGEN_ASM_COMMENT("mybegin4"); for(Index k=0; k struct gemm_pack_lhs { - void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, + EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0) { -// enum { PacketSize = packet_traits::size }; + typedef typename packet_traits::type Packet; + enum { PacketSize = packet_traits::size }; + + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) ); conj_if::IsComplex && Conjugate> cj; const_blas_data_mapper lhs(_lhs,lhsStride); Index count = 0; @@ -1128,9 +1141,44 @@ struct gemm_pack_lhs for(Index i=0; i=1*PacketSize) A = ploadu(&lhs(i+0*PacketSize, k)); + if(Pack1>=2*PacketSize) B = ploadu(&lhs(i+1*PacketSize, k)); + if(Pack1>=3*PacketSize) C = ploadu(&lhs(i+2*PacketSize, k)); + if(Pack1>=4*PacketSize) D = ploadu(&lhs(i+3*PacketSize, k)); + if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; } + if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; } + if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; } + if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; } + } + } + else + { + for(Index k=0; k=Pack2) @@ -1164,9 +1212,10 @@ struct gemm_pack_rhs { typedef typename packet_traits::type Packet; enum { PacketSize = packet_traits::size }; - void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, + EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0) { + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR"); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; Index packet_cols = (cols/nr) * nr; @@ -1211,9 +1260,10 @@ template { enum { PacketSize = packet_traits::size }; - void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, + EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0) { + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR"); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; Index packet_cols = (cols/nr) * nr; @@ -1279,4 +1329,6 @@ inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2) internal::manage_caching_sizes(SetAction, &l1, &l2); } +} // end namespace Eigen + #endif // EIGEN_GENERAL_BLOCK_PANEL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h index ae94a2795..76fc32032 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -25,6 +25,8 @@ #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H +namespace Eigen { + namespace internal { template class level3_blocking; @@ -77,7 +79,7 @@ static void run(Index rows, Index cols, Index depth, typedef gebp_traits Traits; - Index kc = blocking.kc(); // cache block size along the K direction + Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction @@ -247,7 +249,7 @@ struct gemm_functor BlockingType& m_blocking; }; -template class gemm_blocking_space; template @@ -280,8 +282,8 @@ class level3_blocking inline RhsScalar* blockW() { return m_blockW; } }; -template -class gemm_blocking_space +template +class gemm_blocking_space : public level3_blocking< typename conditional::type, typename conditional::type> @@ -322,8 +324,8 @@ class gemm_blocking_space -class gemm_blocking_space +template +class gemm_blocking_space : public level3_blocking< typename conditional::type, typename conditional::type> @@ -347,7 +349,7 @@ class gemm_blocking_spacem_nc = Transpose ? rows : cols; this->m_kc = depth; - computeProductBlockingSizes(this->m_kc, this->m_mc, this->m_nc); + computeProductBlockingSizes(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc*Traits::WorkSpaceFactor; @@ -412,8 +414,8 @@ class GeneralProduct { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); @@ -436,4 +438,6 @@ class GeneralProduct } }; +} // end namespace Eigen + #endif // EIGEN_GENERAL_MATRIX_MATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h index 5043b64fe..74331ee4f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h @@ -25,6 +25,8 @@ #ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H #define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H +namespace Eigen { + namespace internal { /********************************************************************** @@ -42,14 +44,14 @@ struct tribb_kernel; template + int ResStorageOrder, int UpLo, int Version = Specialized> struct general_matrix_matrix_triangular_product; // as usual if the result is row major => we transpose the product template -struct general_matrix_matrix_triangular_product -{ + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo, int Version> +struct general_matrix_matrix_triangular_product +{ typedef typename scalar_product_traits::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha) @@ -63,8 +65,8 @@ struct general_matrix_matrix_triangular_product -struct general_matrix_matrix_triangular_product + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo, int Version> +struct general_matrix_matrix_triangular_product { typedef typename scalar_product_traits::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride, @@ -201,13 +203,13 @@ TriangularView& TriangularView::assignProduct( typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs; typedef typename internal::remove_all::type _ActualLhs; - const ActualLhs actualLhs = LhsBlasTraits::extract(prod.lhs()); + typename internal::add_const_on_value_type::type actualLhs = LhsBlasTraits::extract(prod.lhs()); typedef typename internal::remove_all::type Rhs; typedef internal::blas_traits RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs; typedef typename internal::remove_all::type _ActualRhs; - const ActualRhs actualRhs = RhsBlasTraits::extract(prod.rhs()); + typename internal::add_const_on_value_type::type actualRhs = RhsBlasTraits::extract(prod.rhs()); typename ProductDerived::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived()); @@ -222,4 +224,6 @@ TriangularView& TriangularView::assignProduct( return *this; } +} // end namespace Eigen + #endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h new file mode 100644 index 000000000..3deed068e --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h @@ -0,0 +1,146 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Level 3 BLAS SYRK/HERK implementation. + ******************************************************************************** +*/ + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H +#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H + +namespace Eigen { + +namespace internal { + +template +struct general_matrix_matrix_rankupdate : + general_matrix_matrix_triangular_product< + Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {}; + + +// try to go to BLAS specialization +#define EIGEN_MKL_RANKUPDATE_SPECIALIZE(Scalar) \ +template \ +struct general_matrix_matrix_triangular_product { \ + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \ + const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) \ + { \ + if (lhs==rhs) { \ + general_matrix_matrix_rankupdate \ + ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha); \ + } else { \ + general_matrix_matrix_triangular_product \ + ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha); \ + } \ + } \ +}; + +EIGEN_MKL_RANKUPDATE_SPECIALIZE(double) +//EIGEN_MKL_RANKUPDATE_SPECIALIZE(dcomplex) +EIGEN_MKL_RANKUPDATE_SPECIALIZE(float) +//EIGEN_MKL_RANKUPDATE_SPECIALIZE(scomplex) + +// SYRK for float/double +#define EIGEN_MKL_RANKUPDATE_R(EIGTYPE, MKLTYPE, MKLFUNC) \ +template \ +struct general_matrix_matrix_rankupdate { \ + enum { \ + IsLower = (UpLo&Lower) == Lower, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((AStorageOrder==ColMajor) && ConjugateA) ? 1 : 0 \ + }; \ + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* rhs, Index rhsStride, EIGTYPE* res, Index resStride, EIGTYPE alpha) \ + { \ + /* typedef Matrix MatrixRhs;*/ \ +\ + MKL_INT lda=lhsStride, ldc=resStride, n=size, k=depth; \ + char uplo=(IsLower) ? 'L' : 'U', trans=(AStorageOrder==RowMajor) ? 'T':'N'; \ + MKLTYPE alpha_, beta_; \ +\ +/* Set alpha_ & beta_ */ \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, EIGTYPE(1)); \ + MKLFUNC(&uplo, &trans, &n, &k, &alpha_, lhs, &lda, &beta_, res, &ldc); \ + } \ +}; + +// HERK for complex data +#define EIGEN_MKL_RANKUPDATE_C(EIGTYPE, MKLTYPE, RTYPE, MKLFUNC) \ +template \ +struct general_matrix_matrix_rankupdate { \ + enum { \ + IsLower = (UpLo&Lower) == Lower, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = (((AStorageOrder==ColMajor) && ConjugateA) || ((AStorageOrder==RowMajor) && !ConjugateA)) ? 1 : 0 \ + }; \ + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* rhs, Index rhsStride, EIGTYPE* res, Index resStride, EIGTYPE alpha) \ + { \ + typedef Matrix MatrixType; \ +\ + MKL_INT lda=lhsStride, ldc=resStride, n=size, k=depth; \ + char uplo=(IsLower) ? 'L' : 'U', trans=(AStorageOrder==RowMajor) ? 'C':'N'; \ + RTYPE alpha_, beta_; \ + const EIGTYPE* a_ptr; \ +\ +/* Set alpha_ & beta_ */ \ +/* assign_scalar_eig2mkl(alpha_, alpha); */\ +/* assign_scalar_eig2mkl(beta_, EIGTYPE(1));*/ \ + alpha_ = alpha.real(); \ + beta_ = 1.0; \ +/* Copy with conjugation in some cases*/ \ + MatrixType a; \ + if (conjA) { \ + Map > mapA(lhs,n,k,OuterStride<>(lhsStride)); \ + a = mapA.conjugate(); \ + lda = a.outerStride(); \ + a_ptr = a.data(); \ + } else a_ptr=lhs; \ + MKLFUNC(&uplo, &trans, &n, &k, &alpha_, (MKLTYPE*)a_ptr, &lda, &beta_, (MKLTYPE*)res, &ldc); \ + } \ +}; + + +EIGEN_MKL_RANKUPDATE_R(double, double, dsyrk) +EIGEN_MKL_RANKUPDATE_R(float, float, ssyrk) + +//EIGEN_MKL_RANKUPDATE_C(dcomplex, MKL_Complex16, double, zherk) +//EIGEN_MKL_RANKUPDATE_C(scomplex, MKL_Complex8, double, cherk) + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h new file mode 100644 index 000000000..060af328e --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h @@ -0,0 +1,118 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * General matrix-matrix product functionality based on ?GEMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_MKL_H +#define EIGEN_GENERAL_MATRIX_MATRIX_MKL_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements general matrix-matrix multiplication using BLAS +* gemm function via partial specialization of +* general_matrix_matrix_product::run(..) method for float, double, +* std::complex and std::complex types +**********************************************************************/ + +// gemm specialization + +#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, MKLTYPE, MKLPREFIX) \ +template< \ + typename Index, \ + int LhsStorageOrder, bool ConjugateLhs, \ + int RhsStorageOrder, bool ConjugateRhs> \ +struct general_matrix_matrix_product \ +{ \ +static void run(Index rows, Index cols, Index depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, \ + level3_blocking& /*blocking*/, \ + GemmParallelInfo* /*info = 0*/) \ +{ \ + using std::conj; \ +\ + char transa, transb; \ + MKL_INT m, n, k, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + MKLTYPE alpha_, beta_; \ + MatrixX##EIGPREFIX a_tmp, b_tmp; \ + EIGTYPE myone(1);\ +\ +/* Set transpose options */ \ + transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \ + transb = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set m, n, k */ \ + m = (MKL_INT)rows; \ + n = (MKL_INT)cols; \ + k = (MKL_INT)depth; \ +\ +/* Set alpha_ & beta_ */ \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ +\ +/* Set lda, ldb, ldc */ \ + lda = (MKL_INT)lhsStride; \ + ldb = (MKL_INT)rhsStride; \ + ldc = (MKL_INT)resStride; \ +\ +/* Set a, b, c */ \ + if ((LhsStorageOrder==ColMajor) && (ConjugateLhs)) { \ + Map > lhs(_lhs,m,k,OuterStride<>(lhsStride)); \ + a_tmp = lhs.conjugate(); \ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else a = _lhs; \ +\ + if ((RhsStorageOrder==ColMajor) && (ConjugateRhs)) { \ + Map > rhs(_rhs,k,n,OuterStride<>(rhsStride)); \ + b_tmp = rhs.conjugate(); \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ + } else b = _rhs; \ +\ + MKLPREFIX##gemm(&transa, &transb, &m, &n, &k, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \ +}}; + +GEMM_SPECIALIZATION(double, d, double, d) +GEMM_SPECIALIZATION(float, f, float, s) +GEMM_SPECIALIZATION(dcomplex, cd, MKL_Complex16, z) +GEMM_SPECIALIZATION(scomplex, cf, MKL_Complex8, c) + +} // end namespase internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector.h index e0e2cbf8f..d868a66a1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector.h @@ -25,6 +25,8 @@ #ifndef EIGEN_GENERAL_MATRIX_VECTOR_H #define EIGEN_GENERAL_MATRIX_VECTOR_H +namespace Eigen { + namespace internal { /* Optimized col-major matrix * vector product: @@ -40,8 +42,8 @@ namespace internal { * |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp * |cplx |real |real | optimal case, vectorization possible via real-cplx mul */ -template -struct general_matrix_vector_product +template +struct general_matrix_vector_product { typedef typename scalar_product_traits::ReturnType ResScalar; @@ -99,7 +101,7 @@ EIGEN_DONT_INLINE static void run( // How many coeffs of the result do we have to skip to be aligned. // Here we assume data are at least aligned on the base scalar type. - Index alignedStart = first_aligned(res,size); + Index alignedStart = internal::first_aligned(res,size); Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0; const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; @@ -109,7 +111,7 @@ EIGEN_DONT_INLINE static void run( : FirstAligned; // we cannot assume the first element is aligned because of sub-matrices - const Index lhsAlignmentOffset = first_aligned(lhs,size); + const Index lhsAlignmentOffset = internal::first_aligned(lhs,size); // find how many columns do we have to skip to be aligned with the result (if possible) Index skipColumns = 0; @@ -296,8 +298,8 @@ EIGEN_DONT_INLINE static void run( * - alpha is always a complex (or converted to a complex) * - no vectorization */ -template -struct general_matrix_vector_product +template +struct general_matrix_vector_product { typedef typename scalar_product_traits::ReturnType ResScalar; @@ -351,7 +353,7 @@ EIGEN_DONT_INLINE static void run( // How many coeffs of the result do we have to skip to be aligned. // Here we assume data are at least aligned on the base scalar type // if that's not the case then vectorization is discarded, see below. - Index alignedStart = first_aligned(rhs, depth); + Index alignedStart = internal::first_aligned(rhs, depth); Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0; const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; @@ -361,7 +363,7 @@ EIGEN_DONT_INLINE static void run( : FirstAligned; // we cannot assume the first element is aligned because of sub-matrices - const Index lhsAlignmentOffset = first_aligned(lhs,depth); + const Index lhsAlignmentOffset = internal::first_aligned(lhs,depth); // find how many rows do we have to skip to be aligned with rhs (if possible) Index skipRows = 0; @@ -556,4 +558,6 @@ EIGEN_DONT_INLINE static void run( } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_GENERAL_MATRIX_VECTOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h new file mode 100644 index 000000000..e9de6af3e --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixVector_MKL.h @@ -0,0 +1,131 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * General matrix-vector product functionality based on ?GEMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_GENERAL_MATRIX_VECTOR_MKL_H +#define EIGEN_GENERAL_MATRIX_VECTOR_MKL_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements general matrix-vector multiplication using BLAS +* gemv function via partial specialization of +* general_matrix_vector_product::run(..) method for float, double, +* std::complex and std::complex types +**********************************************************************/ + +// gemv specialization + +template +struct general_matrix_vector_product_gemv : + general_matrix_vector_product {}; + +#define EIGEN_MKL_GEMV_SPECIALIZE(Scalar) \ +template \ +struct general_matrix_vector_product { \ +static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const Scalar* lhs, Index lhsStride, \ + const Scalar* rhs, Index rhsIncr, \ + Scalar* res, Index resIncr, Scalar alpha) \ +{ \ + if (ConjugateLhs) { \ + general_matrix_vector_product::run( \ + rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \ + } else { \ + general_matrix_vector_product_gemv::run( \ + rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \ + } \ +} \ +}; \ +template \ +struct general_matrix_vector_product { \ +static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const Scalar* lhs, Index lhsStride, \ + const Scalar* rhs, Index rhsIncr, \ + Scalar* res, Index resIncr, Scalar alpha) \ +{ \ + general_matrix_vector_product_gemv::run( \ + rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \ +} \ +}; \ + +EIGEN_MKL_GEMV_SPECIALIZE(double) +EIGEN_MKL_GEMV_SPECIALIZE(float) +EIGEN_MKL_GEMV_SPECIALIZE(dcomplex) +EIGEN_MKL_GEMV_SPECIALIZE(scomplex) + +#define EIGEN_MKL_GEMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLPREFIX) \ +template \ +struct general_matrix_vector_product_gemv \ +{ \ +typedef Matrix GEMVVector;\ +\ +static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* rhs, Index rhsIncr, \ + EIGTYPE* res, Index resIncr, EIGTYPE alpha) \ +{ \ + MKL_INT m=rows, n=cols, lda=lhsStride, incx=rhsIncr, incy=resIncr; \ + MKLTYPE alpha_, beta_; \ + const EIGTYPE *x_ptr, myone(1); \ + char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \ + if (LhsStorageOrder==RowMajor) { \ + m=cols; \ + n=rows; \ + }\ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ + GEMVVector x_tmp; \ + if (ConjugateRhs) { \ + Map > map_x(rhs,cols,1,InnerStride<>(incx)); \ + x_tmp=map_x.conjugate(); \ + x_ptr=x_tmp.data(); \ + incx=1; \ + } else x_ptr=rhs; \ + MKLPREFIX##gemv(&trans, &m, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \ +}\ +}; + +EIGEN_MKL_GEMV_SPECIALIZATION(double, double, d) +EIGEN_MKL_GEMV_SPECIALIZATION(float, float, s) +EIGEN_MKL_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, z) +EIGEN_MKL_GEMV_SPECIALIZATION(scomplex, MKL_Complex8, c) + +} // end namespase internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_VECTOR_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/Parallelizer.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/Parallelizer.h index ecdedc363..725216162 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/Parallelizer.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/Parallelizer.h @@ -25,6 +25,8 @@ #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H +namespace Eigen { + namespace internal { /** \internal */ @@ -55,12 +57,23 @@ inline void manage_multi_threading(Action action, int* v) } } +} + +/** Must be call first when calling Eigen from multiple threads */ +inline void initParallel() +{ + int nbt; + internal::manage_multi_threading(GetAction, &nbt); + std::ptrdiff_t l1, l2; + internal::manage_caching_sizes(GetAction, &l1, &l2); +} + /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; - manage_multi_threading(GetAction, &ret); + internal::manage_multi_threading(GetAction, &ret); return ret; } @@ -68,9 +81,11 @@ inline int nbThreads() * \sa nbThreads */ inline void setNbThreads(int v) { - manage_multi_threading(SetAction, &v); + internal::manage_multi_threading(SetAction, &v); } +namespace internal { + template struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {} @@ -85,7 +100,9 @@ template struct GemmParallelInfo template void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { -#ifndef EIGEN_HAS_OPENMP + // TODO when EIGEN_USE_BLAS is defined, + // we should still enable OMP for other scalar types +#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole @@ -117,6 +134,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos if(threads==1) return func(0,rows, 0,cols); + Eigen::initParallel(); func.initParallelSession(); if(transpose) @@ -151,4 +169,6 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_PARALLELIZER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h index ccd757cfa..91ba12081 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H #define EIGEN_SELFADJOINT_MATRIX_MATRIX_H +namespace Eigen { + namespace internal { // pack a selfadjoint block diagonal for use with the gebp_kernel @@ -400,8 +402,8 @@ struct SelfadjointProductMatrix { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); @@ -424,4 +426,6 @@ struct SelfadjointProductMatrix } }; +} // end namespace Eigen + #endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h new file mode 100644 index 000000000..4e5c4125c --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h @@ -0,0 +1,295 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H +#define EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H + +namespace Eigen { + +namespace internal { + + +/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */ + +#define EIGEN_MKL_SYMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct product_selfadjoint_matrix \ +{\ +\ + static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha) \ + { \ + char side='L', uplo='L'; \ + MKL_INT m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + MKLTYPE alpha_, beta_; \ + MatrixX##EIGPREFIX b_tmp; \ + EIGTYPE myone(1);\ +\ +/* Set transpose options */ \ +/* Set m, n, k */ \ + m = (MKL_INT)rows; \ + n = (MKL_INT)cols; \ +\ +/* Set alpha_ & beta_ */ \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ +\ +/* Set lda, ldb, ldc */ \ + lda = (MKL_INT)lhsStride; \ + ldb = (MKL_INT)rhsStride; \ + ldc = (MKL_INT)resStride; \ +\ +/* Set a, b, c */ \ + if (LhsStorageOrder==RowMajor) uplo='U'; \ + a = _lhs; \ +\ + if (RhsStorageOrder==RowMajor) { \ + Map > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = rhs.adjoint(); \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ + } else b = _rhs; \ +\ + MKLPREFIX##symm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \ +\ + } \ +}; + + +#define EIGEN_MKL_HEMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct product_selfadjoint_matrix \ +{\ + static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha) \ + { \ + char side='L', uplo='L'; \ + MKL_INT m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + MKLTYPE alpha_, beta_; \ + MatrixX##EIGPREFIX b_tmp; \ + Matrix a_tmp; \ + EIGTYPE myone(1); \ +\ +/* Set transpose options */ \ +/* Set m, n, k */ \ + m = (MKL_INT)rows; \ + n = (MKL_INT)cols; \ +\ +/* Set alpha_ & beta_ */ \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ +\ +/* Set lda, ldb, ldc */ \ + lda = (MKL_INT)lhsStride; \ + ldb = (MKL_INT)rhsStride; \ + ldc = (MKL_INT)resStride; \ +\ +/* Set a, b, c */ \ + if (((LhsStorageOrder==ColMajor) && ConjugateLhs) || ((LhsStorageOrder==RowMajor) && (!ConjugateLhs))) { \ + Map, 0, OuterStride<> > lhs(_lhs,m,m,OuterStride<>(lhsStride)); \ + a_tmp = lhs.conjugate(); \ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else a = _lhs; \ + if (LhsStorageOrder==RowMajor) uplo='U'; \ +\ + if (RhsStorageOrder==ColMajor && (!ConjugateRhs)) { \ + b = _rhs; } \ + else { \ + if (RhsStorageOrder==ColMajor && ConjugateRhs) { \ + Map > rhs(_rhs,m,n,OuterStride<>(rhsStride)); \ + b_tmp = rhs.conjugate(); \ + } else \ + if (ConjugateRhs) { \ + Map > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = rhs.adjoint(); \ + } else { \ + Map > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = rhs.transpose(); \ + } \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ + } \ +\ + MKLPREFIX##hemm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \ +\ + } \ +}; + +EIGEN_MKL_SYMM_L(double, double, d, d) +EIGEN_MKL_SYMM_L(float, float, f, s) +EIGEN_MKL_HEMM_L(dcomplex, MKL_Complex16, cd, z) +EIGEN_MKL_HEMM_L(scomplex, MKL_Complex8, cf, c) + + +/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */ + +#define EIGEN_MKL_SYMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct product_selfadjoint_matrix \ +{\ +\ + static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha) \ + { \ + char side='R', uplo='L'; \ + MKL_INT m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + MKLTYPE alpha_, beta_; \ + MatrixX##EIGPREFIX b_tmp; \ + EIGTYPE myone(1);\ +\ +/* Set m, n, k */ \ + m = (MKL_INT)rows; \ + n = (MKL_INT)cols; \ +\ +/* Set alpha_ & beta_ */ \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ +\ +/* Set lda, ldb, ldc */ \ + lda = (MKL_INT)rhsStride; \ + ldb = (MKL_INT)lhsStride; \ + ldc = (MKL_INT)resStride; \ +\ +/* Set a, b, c */ \ + if (RhsStorageOrder==RowMajor) uplo='U'; \ + a = _rhs; \ +\ + if (LhsStorageOrder==RowMajor) { \ + Map > lhs(_lhs,n,m,OuterStride<>(rhsStride)); \ + b_tmp = lhs.adjoint(); \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ + } else b = _lhs; \ +\ + MKLPREFIX##symm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \ +\ + } \ +}; + + +#define EIGEN_MKL_HEMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct product_selfadjoint_matrix \ +{\ + static EIGEN_DONT_INLINE void run( \ + Index rows, Index cols, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha) \ + { \ + char side='R', uplo='L'; \ + MKL_INT m, n, lda, ldb, ldc; \ + const EIGTYPE *a, *b; \ + MKLTYPE alpha_, beta_; \ + MatrixX##EIGPREFIX b_tmp; \ + Matrix a_tmp; \ + EIGTYPE myone(1); \ +\ +/* Set m, n, k */ \ + m = (MKL_INT)rows; \ + n = (MKL_INT)cols; \ +\ +/* Set alpha_ & beta_ */ \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ +\ +/* Set lda, ldb, ldc */ \ + lda = (MKL_INT)rhsStride; \ + ldb = (MKL_INT)lhsStride; \ + ldc = (MKL_INT)resStride; \ +\ +/* Set a, b, c */ \ + if (((RhsStorageOrder==ColMajor) && ConjugateRhs) || ((RhsStorageOrder==RowMajor) && (!ConjugateRhs))) { \ + Map, 0, OuterStride<> > rhs(_rhs,n,n,OuterStride<>(rhsStride)); \ + a_tmp = rhs.conjugate(); \ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else a = _rhs; \ + if (RhsStorageOrder==RowMajor) uplo='U'; \ +\ + if (LhsStorageOrder==ColMajor && (!ConjugateLhs)) { \ + b = _lhs; } \ + else { \ + if (LhsStorageOrder==ColMajor && ConjugateLhs) { \ + Map > lhs(_lhs,m,n,OuterStride<>(lhsStride)); \ + b_tmp = lhs.conjugate(); \ + } else \ + if (ConjugateLhs) { \ + Map > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \ + b_tmp = lhs.adjoint(); \ + } else { \ + Map > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \ + b_tmp = lhs.transpose(); \ + } \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ + } \ +\ + MKLPREFIX##hemm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \ + } \ +}; + +EIGEN_MKL_SYMM_R(double, double, d, d) +EIGEN_MKL_SYMM_R(float, float, f, s) +EIGEN_MKL_HEMM_R(dcomplex, MKL_Complex16, cd, z) +EIGEN_MKL_HEMM_R(scomplex, MKL_Complex8, cf, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h index d6121fc07..7f39ef01a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H +namespace Eigen { + namespace internal { /* Optimized selfadjoint matrix * vector product: @@ -32,8 +34,15 @@ namespace internal { * the number of load/stores of the result by a factor 2 and to reduce * the instruction dependency. */ -template -static EIGEN_DONT_INLINE void product_selfadjoint_vector( + +template +struct selfadjoint_matrix_vector_product; + +template +struct selfadjoint_matrix_vector_product + +{ +static EIGEN_DONT_INLINE void run( Index size, const Scalar* lhs, Index lhsStride, const Scalar* _rhs, Index rhsIncr, @@ -85,14 +94,14 @@ static EIGEN_DONT_INLINE void product_selfadjoint_vector( Scalar t1 = cjAlpha * rhs[j+1]; Packet ptmp1 = pset1(t1); - Scalar t2 = 0; + Scalar t2(0); Packet ptmp2 = pset1(t2); - Scalar t3 = 0; + Scalar t3(0); Packet ptmp3 = pset1(t3); size_t starti = FirstTriangular ? 0 : j+2; size_t endi = FirstTriangular ? j : size; - size_t alignedStart = (starti) + first_aligned(&res[starti], endi-starti); + size_t alignedStart = (starti) + internal::first_aligned(&res[starti], endi-starti); size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed @@ -148,7 +157,7 @@ static EIGEN_DONT_INLINE void product_selfadjoint_vector( register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; Scalar t1 = cjAlpha * rhs[j]; - Scalar t2 = 0; + Scalar t2(0); // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed res[j] += cjd.pmul(internal::real(A0[j]), t1); for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) @@ -159,6 +168,7 @@ static EIGEN_DONT_INLINE void product_selfadjoint_vector( res[j] += alpha * t2; } } +}; } // end namespace internal @@ -193,8 +203,8 @@ struct SelfadjointProductMatrix eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols()); - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); @@ -232,7 +242,7 @@ struct SelfadjointProductMatrix } - internal::product_selfadjoint_vector::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)> + internal::selfadjoint_matrix_vector_product::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run ( lhs.rows(), // size &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info @@ -274,5 +284,6 @@ struct SelfadjointProductMatrix } }; +} // end namespace Eigen #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h new file mode 100644 index 000000000..f88d483b6 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h @@ -0,0 +1,114 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H +#define EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements selfadjoint matrix-vector multiplication using BLAS +**********************************************************************/ + +// symv/hemv specialization + +template +struct selfadjoint_matrix_vector_product_symv : + selfadjoint_matrix_vector_product {}; + +#define EIGEN_MKL_SYMV_SPECIALIZE(Scalar) \ +template \ +struct selfadjoint_matrix_vector_product { \ +static EIGEN_DONT_INLINE void run( \ + Index size, const Scalar* lhs, Index lhsStride, \ + const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { \ + enum {\ + IsColMajor = StorageOrder==ColMajor \ + }; \ + if (IsColMajor == ConjugateLhs) {\ + selfadjoint_matrix_vector_product::run( \ + size, lhs, lhsStride, _rhs, rhsIncr, res, alpha); \ + } else {\ + selfadjoint_matrix_vector_product_symv::run( \ + size, lhs, lhsStride, _rhs, rhsIncr, res, alpha); \ + }\ + } \ +}; \ + +EIGEN_MKL_SYMV_SPECIALIZE(double) +EIGEN_MKL_SYMV_SPECIALIZE(float) +EIGEN_MKL_SYMV_SPECIALIZE(dcomplex) +EIGEN_MKL_SYMV_SPECIALIZE(scomplex) + +#define EIGEN_MKL_SYMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLFUNC) \ +template \ +struct selfadjoint_matrix_vector_product_symv \ +{ \ +typedef Matrix SYMVVector;\ +\ +static EIGEN_DONT_INLINE void run( \ +Index size, const EIGTYPE* lhs, Index lhsStride, \ +const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* res, EIGTYPE alpha) \ +{ \ + enum {\ + IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \ + IsLower = UpLo == Lower ? 1 : 0 \ + }; \ + MKL_INT n=size, lda=lhsStride, incx=rhsIncr, incy=1; \ + MKLTYPE alpha_, beta_; \ + const EIGTYPE *x_ptr, myone(1); \ + char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, myone); \ + SYMVVector x_tmp; \ + if (ConjugateRhs) { \ + Map > map_x(_rhs,size,1,InnerStride<>(incx)); \ + x_tmp=map_x.conjugate(); \ + x_ptr=x_tmp.data(); \ + incx=1; \ + } else x_ptr=_rhs; \ + MKLFUNC(&uplo, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \ +}\ +}; + +EIGEN_MKL_SYMV_SPECIALIZATION(double, double, dsymv) +EIGEN_MKL_SYMV_SPECIALIZATION(float, float, ssymv) +EIGEN_MKL_SYMV_SPECIALIZATION(dcomplex, MKL_Complex16, zhemv) +EIGEN_MKL_SYMV_SPECIALIZATION(scomplex, MKL_Complex8, chemv) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointProduct.h index 3a4523fa4..a3ff9e3e7 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointProduct.h @@ -31,6 +31,8 @@ * It corresponds to the level 3 SYRK and level 2 SYR Blas routines. **********************************************************************/ +namespace Eigen { + template struct selfadjoint_rank1_update; @@ -72,7 +74,7 @@ struct selfadjoint_product_selector typedef internal::blas_traits OtherBlasTraits; typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; typedef typename internal::remove_all::type _ActualOtherType; - const ActualOtherType actualOther = OtherBlasTraits::extract(other.derived()); + typename internal::add_const_on_value_type::type actualOther = OtherBlasTraits::extract(other.derived()); Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived()); @@ -105,12 +107,12 @@ struct selfadjoint_product_selector typedef internal::blas_traits OtherBlasTraits; typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; typedef typename internal::remove_all::type _ActualOtherType; - const ActualOtherType actualOther = OtherBlasTraits::extract(other.derived()); + typename internal::add_const_on_value_type::type actualOther = OtherBlasTraits::extract(other.derived()); Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived()); enum { IsRowMajor = (internal::traits::Flags&RowMajorBit) ? 1 : 0 }; - + internal::general_matrix_matrix_triangular_product::IsComplex, Scalar, _ActualOtherType::Flags&RowMajorBit ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits::IsComplex, @@ -133,4 +135,6 @@ SelfAdjointView& SelfAdjointView return *this; } +} // end namespace Eigen + #endif // EIGEN_SELFADJOINT_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointRank2Update.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointRank2Update.h index 9f8b8438a..001cfb591 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/SelfadjointRank2Update.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SELFADJOINTRANK2UPTADE_H #define EIGEN_SELFADJOINTRANK2UPTADE_H +namespace Eigen { + namespace internal { /* Optimized selfadjoint matrix += alpha * uv' + conj(alpha)*vu' @@ -76,12 +78,12 @@ SelfAdjointView& SelfAdjointView typedef internal::blas_traits UBlasTraits; typedef typename UBlasTraits::DirectLinearAccessType ActualUType; typedef typename internal::remove_all::type _ActualUType; - const ActualUType actualU = UBlasTraits::extract(u.derived()); + typename internal::add_const_on_value_type::type actualU = UBlasTraits::extract(u.derived()); typedef internal::blas_traits VBlasTraits; typedef typename VBlasTraits::DirectLinearAccessType ActualVType; typedef typename internal::remove_all::type _ActualVType; - const ActualVType actualV = VBlasTraits::extract(v.derived()); + typename internal::add_const_on_value_type::type actualV = VBlasTraits::extract(v.derived()); // If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and // vice versa, and take the complex conjugate of all coefficients and vector entries. @@ -101,4 +103,6 @@ SelfAdjointView& SelfAdjointView return *this; } +} // end namespace Eigen + #endif // EIGEN_SELFADJOINTRANK2UPTADE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h index 0c48d2efb..06053bfd9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -25,6 +25,8 @@ #ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H #define EIGEN_TRIANGULAR_MATRIX_MATRIX_H +namespace Eigen { + namespace internal { // template @@ -58,16 +60,16 @@ template + int ResStorageOrder, int Version = Specialized> struct product_triangular_matrix_matrix; template + int RhsStorageOrder, bool ConjugateRhs, int Version> struct product_triangular_matrix_matrix + RhsStorageOrder,ConjugateRhs,RowMajor,Version> { static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, @@ -91,15 +93,15 @@ struct product_triangular_matrix_matrix + int RhsStorageOrder, bool ConjugateRhs, int Version> struct product_triangular_matrix_matrix + RhsStorageOrder,ConjugateRhs,ColMajor,Version> { typedef gebp_traits Traits; enum { - SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), + SmallPanelWidth = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), IsLower = (Mode&Lower) == Lower, SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1 }; @@ -220,10 +222,10 @@ struct product_triangular_matrix_matrix + int RhsStorageOrder, bool ConjugateRhs, int Version> struct product_triangular_matrix_matrix + RhsStorageOrder,ConjugateRhs,ColMajor,Version> { typedef gebp_traits Traits; enum { @@ -378,8 +380,8 @@ struct TriangularProduct template void scaleAndAddTo(Dest& dst, Scalar alpha) const { - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); @@ -399,5 +401,6 @@ struct TriangularProduct } }; +} // end namespace Eigen #endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h new file mode 100644 index 000000000..8173da5bb --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h @@ -0,0 +1,309 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Triangular matrix * matrix product functionality based on ?TRMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H +#define EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H + +namespace Eigen { + +namespace internal { + + +template +struct product_triangular_matrix_matrix_trmm : + product_triangular_matrix_matrix {}; + + +// try to go to BLAS specialization +#define EIGEN_MKL_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \ +template \ +struct product_triangular_matrix_matrix { \ + static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\ + const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) { \ + product_triangular_matrix_matrix_trmm::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \ + } \ +}; + +EIGEN_MKL_TRMM_SPECIALIZE(double, true) +EIGEN_MKL_TRMM_SPECIALIZE(double, false) +EIGEN_MKL_TRMM_SPECIALIZE(dcomplex, true) +EIGEN_MKL_TRMM_SPECIALIZE(dcomplex, false) +EIGEN_MKL_TRMM_SPECIALIZE(float, true) +EIGEN_MKL_TRMM_SPECIALIZE(float, false) +EIGEN_MKL_TRMM_SPECIALIZE(scomplex, true) +EIGEN_MKL_TRMM_SPECIALIZE(scomplex, false) + +// implements col-major += alpha * op(triangular) * op(general) +#define EIGEN_MKL_TRMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct product_triangular_matrix_matrix_trmm \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \ + }; \ +\ + static EIGEN_DONT_INLINE void run( \ + Index _rows, Index _cols, Index _depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha) \ + { \ + Index diagSize = (std::min)(_rows,_depth); \ + Index rows = IsLower ? _rows : diagSize; \ + Index depth = IsLower ? diagSize : _depth; \ + Index cols = _cols; \ +\ + typedef Matrix MatrixLhs; \ + typedef Matrix MatrixRhs; \ +\ +/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \ + if (rows != depth) { \ +\ + int nthr = mkl_domain_get_max_threads(MKL_BLAS); \ +\ + if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \ + /* Most likely no benefit to call TRMM or GEMM from MKL*/ \ + product_triangular_matrix_matrix::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \ + /*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \ + } else { \ + /* Make sense to call GEMM */ \ + Map > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixLhs aa_tmp=lhsMap.template triangularView(); \ + MKL_INT aStride = aa_tmp.outerStride(); \ + gemm_blocking_space blocking(_rows,_cols,_depth); \ + general_matrix_matrix_product::run( \ + rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, blocking, 0); \ +\ + /*std::cout << "TRMM_L: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \ + } \ + return; \ + } \ + char side = 'L', transa, uplo, diag = 'N'; \ + EIGTYPE *b; \ + const EIGTYPE *a; \ + MKL_INT m, n, lda, ldb; \ + MKLTYPE alpha_; \ +\ +/* Set alpha_*/ \ + assign_scalar_eig2mkl(alpha_, alpha); \ +\ +/* Set m, n */ \ + m = (MKL_INT)diagSize; \ + n = (MKL_INT)cols; \ +\ +/* Set trans */ \ + transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set b, ldb */ \ + Map > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \ + MatrixX##EIGPREFIX b_tmp; \ +\ + if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ +\ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + Map > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixLhs a_tmp; \ +\ + if ((conjA!=0) || (SetDiag==0)) { \ + if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \ + if (IsZeroDiag) \ + a_tmp.diagonal().setZero(); \ + else if (IsUnitDiag) \ + a_tmp.diagonal().setOnes();\ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else { \ + a = _lhs; \ + lda = lhsStride; \ + } \ + /*std::cout << "TRMM_L: A is square! Go to MKL TRMM implementation! \n";*/ \ +/* call ?trmm*/ \ + MKLPREFIX##trmm(&side, &uplo, &transa, &diag, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (MKLTYPE*)b, &ldb); \ +\ +/* Add op(a_triangular)*b into res*/ \ + Map > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ + res_tmp=res_tmp+b_tmp; \ + } \ +}; + +EIGEN_MKL_TRMM_L(double, double, d, d) +EIGEN_MKL_TRMM_L(dcomplex, MKL_Complex16, cd, z) +EIGEN_MKL_TRMM_L(float, float, f, s) +EIGEN_MKL_TRMM_L(scomplex, MKL_Complex8, cf, c) + +// implements col-major += alpha * op(general) * op(triangular) +#define EIGEN_MKL_TRMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct product_triangular_matrix_matrix_trmm \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \ + }; \ +\ + static EIGEN_DONT_INLINE void run( \ + Index _rows, Index _cols, Index _depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha) \ + { \ + Index diagSize = (std::min)(_cols,_depth); \ + Index rows = _rows; \ + Index depth = IsLower ? _depth : diagSize; \ + Index cols = IsLower ? diagSize : _cols; \ +\ + typedef Matrix MatrixLhs; \ + typedef Matrix MatrixRhs; \ +\ +/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \ + if (cols != depth) { \ +\ + int nthr = mkl_domain_get_max_threads(MKL_BLAS); \ +\ + if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \ + /* Most likely no benefit to call TRMM or GEMM from MKL*/ \ + product_triangular_matrix_matrix::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \ + /*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \ + } else { \ + /* Make sense to call GEMM */ \ + Map > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \ + MatrixRhs aa_tmp=rhsMap.template triangularView(); \ + MKL_INT aStride = aa_tmp.outerStride(); \ + gemm_blocking_space blocking(_rows,_cols,_depth); \ + general_matrix_matrix_product::run( \ + rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, blocking, 0); \ +\ + /*std::cout << "TRMM_R: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \ + } \ + return; \ + } \ + char side = 'R', transa, uplo, diag = 'N'; \ + EIGTYPE *b; \ + const EIGTYPE *a; \ + MKL_INT m, n, lda, ldb; \ + MKLTYPE alpha_; \ +\ +/* Set alpha_*/ \ + assign_scalar_eig2mkl(alpha_, alpha); \ +\ +/* Set m, n */ \ + m = (MKL_INT)rows; \ + n = (MKL_INT)diagSize; \ +\ +/* Set trans */ \ + transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set b, ldb */ \ + Map > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixX##EIGPREFIX b_tmp; \ +\ + if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \ + b = b_tmp.data(); \ + ldb = b_tmp.outerStride(); \ +\ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + Map > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \ + MatrixRhs a_tmp; \ +\ + if ((conjA!=0) || (SetDiag==0)) { \ + if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \ + if (IsZeroDiag) \ + a_tmp.diagonal().setZero(); \ + else if (IsUnitDiag) \ + a_tmp.diagonal().setOnes();\ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else { \ + a = _rhs; \ + lda = rhsStride; \ + } \ + /*std::cout << "TRMM_R: A is square! Go to MKL TRMM implementation! \n";*/ \ +/* call ?trmm*/ \ + MKLPREFIX##trmm(&side, &uplo, &transa, &diag, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (MKLTYPE*)b, &ldb); \ +\ +/* Add op(a_triangular)*b into res*/ \ + Map > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ + res_tmp=res_tmp+b_tmp; \ + } \ +}; + +EIGEN_MKL_TRMM_R(double, double, d, d) +EIGEN_MKL_TRMM_R(dcomplex, MKL_Complex16, cd, z) +EIGEN_MKL_TRMM_R(float, float, f, s) +EIGEN_MKL_TRMM_R(scomplex, MKL_Complex8, cf, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector.h index 71b4a52ab..e1dc0c23e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector.h @@ -25,23 +25,29 @@ #ifndef EIGEN_TRIANGULARMATRIXVECTOR_H #define EIGEN_TRIANGULARMATRIXVECTOR_H +namespace Eigen { + namespace internal { -template -struct product_triangular_matrix_vector; +template +struct triangular_matrix_vector_product; -template -struct product_triangular_matrix_vector +template +struct triangular_matrix_vector_product { typedef typename scalar_product_traits::ReturnType ResScalar; enum { IsLower = ((Mode&Lower)==Lower), - HasUnitDiag = (Mode & UnitDiag)==UnitDiag + HasUnitDiag = (Mode & UnitDiag)==UnitDiag, + HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag }; - static EIGEN_DONT_INLINE void run(Index rows, Index cols, const LhsScalar* _lhs, Index lhsStride, + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha) { static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + Index size = (std::min)(_rows,_cols); + Index rows = IsLower ? _rows : (std::min)(_rows,_cols); + Index cols = IsLower ? (std::min)(_rows,_cols) : _cols; typedef Map, 0, OuterStride<> > LhsMap; const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); @@ -54,45 +60,57 @@ struct product_triangular_matrix_vector > ResMap; ResMap res(_res,rows); - for (Index pi=0; pi0) + if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0) res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r); if (HasUnitDiag) res.coeffRef(i) += alpha * cjRhs.coeff(i); } - Index r = IsLower ? cols - pi - actualPanelWidth : pi; + Index r = IsLower ? rows - pi - actualPanelWidth : pi; if (r>0) { Index s = IsLower ? pi+actualPanelWidth : 0; - general_matrix_vector_product::run( + general_matrix_vector_product::run( r, actualPanelWidth, &lhs.coeffRef(s,pi), lhsStride, &rhs.coeffRef(pi), rhsIncr, &res.coeffRef(s), resIncr, alpha); } } + if((!IsLower) && cols>size) + { + general_matrix_vector_product::run( + rows, cols-size, + &lhs.coeffRef(0,size), lhsStride, + &rhs.coeffRef(size), rhsIncr, + _res, resIncr, alpha); + } } }; -template -struct product_triangular_matrix_vector +template +struct triangular_matrix_vector_product { typedef typename scalar_product_traits::ReturnType ResScalar; enum { IsLower = ((Mode&Lower)==Lower), - HasUnitDiag = (Mode & UnitDiag)==UnitDiag + HasUnitDiag = (Mode & UnitDiag)==UnitDiag, + HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag }; - static void run(Index rows, Index cols, const LhsScalar* _lhs, Index lhsStride, + static void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha) { static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + Index diagSize = (std::min)(_rows,_cols); + Index rows = IsLower ? _rows : diagSize; + Index cols = IsLower ? diagSize : _cols; typedef Map, 0, OuterStride<> > LhsMap; const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); @@ -105,15 +123,15 @@ struct product_triangular_matrix_vector, 0, InnerStride<> > ResMap; ResMap res(_res,rows,InnerStride<>(resIncr)); - for (Index pi=0; pi0) + if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0) res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum(); if (HasUnitDiag) res.coeffRef(i) += alpha * cjRhs.coeff(i); @@ -122,13 +140,21 @@ struct product_triangular_matrix_vector0) { Index s = IsLower ? 0 : pi + actualPanelWidth; - general_matrix_vector_product::run( + general_matrix_vector_product::run( actualPanelWidth, r, &lhs.coeffRef(pi,s), lhsStride, &rhs.coeffRef(s), rhsIncr, &res.coeffRef(pi), resIncr, alpha); } } + if(IsLower && rows>diagSize) + { + general_matrix_vector_product::run( + rows-diagSize, cols, + &lhs.coeffRef(diagSize,0), lhsStride, + &rhs.coeffRef(0), rhsIncr, + &res.coeffRef(diagSize), resIncr, alpha); + } } }; @@ -180,7 +206,7 @@ struct TriangularProduct { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - typedef TriangularProduct<(Mode & UnitDiag) | ((Mode & Lower) ? Upper : Lower),true,Transpose,false,Transpose,true> TriangularProductTranspose; + typedef TriangularProduct<(Mode & (UnitDiag|ZeroDiag)) | ((Mode & Lower) ? Upper : Lower),true,Transpose,false,Transpose,true> TriangularProductTranspose; Transpose dstT(dst); internal::trmv_selector<(int(internal::traits::Flags)&RowMajorBit) ? ColMajor : RowMajor>::run( TriangularProductTranspose(m_rhs.transpose(),m_lhs.transpose()), dstT, alpha); @@ -208,8 +234,8 @@ template<> struct trmv_selector typedef typename ProductType::RhsBlasTraits RhsBlasTraits; typedef Map, Aligned> MappedDest; - const ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); - const ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); + typename internal::add_const_on_value_type::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + typename internal::add_const_on_value_type::type actualRhs = RhsBlasTraits::extract(prod.rhs()); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) * RhsBlasTraits::extractScalarFactor(prod.rhs()); @@ -247,7 +273,7 @@ template<> struct trmv_selector MappedDest(actualDestPtr, dest.size()) = dest; } - internal::product_triangular_matrix_vector + internal::triangular_matrix_vector_product struct trmv_selector Map(actualRhsPtr, actualRhs.size()) = actualRhs; } - internal::product_triangular_matrix_vector + internal::triangular_matrix_vector_product struct trmv_selector } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_TRIANGULARMATRIXVECTOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h new file mode 100644 index 000000000..3c2c3049a --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularMatrixVector_MKL.h @@ -0,0 +1,247 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Triangular matrix-vector product functionality based on ?TRMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H +#define EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements triangular matrix-vector multiplication using BLAS +**********************************************************************/ + +// trmv/hemv specialization + +template +struct triangular_matrix_vector_product_trmv : + triangular_matrix_vector_product {}; + +#define EIGEN_MKL_TRMV_SPECIALIZE(Scalar) \ +template \ +struct triangular_matrix_vector_product { \ + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \ + const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \ + triangular_matrix_vector_product_trmv::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + } \ +}; \ +template \ +struct triangular_matrix_vector_product { \ + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \ + const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \ + triangular_matrix_vector_product_trmv::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + } \ +}; + +EIGEN_MKL_TRMV_SPECIALIZE(double) +EIGEN_MKL_TRMV_SPECIALIZE(float) +EIGEN_MKL_TRMV_SPECIALIZE(dcomplex) +EIGEN_MKL_TRMV_SPECIALIZE(scomplex) + +// implements col-major: res += alpha * op(triangular) * vector +#define EIGEN_MKL_TRMV_CM(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \ +template \ +struct triangular_matrix_vector_product_trmv { \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper \ + }; \ + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \ + { \ + if (ConjLhs || IsZeroDiag) { \ + triangular_matrix_vector_product::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + return; \ + }\ + Index size = (std::min)(_rows,_cols); \ + Index rows = IsLower ? _rows : size; \ + Index cols = IsLower ? size : _cols; \ +\ + typedef VectorX##EIGPREFIX VectorRhs; \ + EIGTYPE *x, *y;\ +\ +/* Set x*/ \ + Map > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \ + VectorRhs x_tmp; \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ +\ +/* Square part handling */\ +\ + char trans, uplo, diag; \ + MKL_INT m, n, lda, incx, incy; \ + EIGTYPE const *a; \ + MKLTYPE alpha_, beta_; \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, EIGTYPE(1)); \ +\ +/* Set m, n */ \ + n = (MKL_INT)size; \ + lda = lhsStride; \ + incx = 1; \ + incy = resIncr; \ +\ +/* Set uplo, trans and diag*/ \ + trans = 'N'; \ + uplo = IsLower ? 'L' : 'U'; \ + diag = IsUnitDiag ? 'U' : 'N'; \ +\ +/* call ?TRMV*/ \ + MKLPREFIX##trmv(&uplo, &trans, &diag, &n, (const MKLTYPE*)_lhs, &lda, (MKLTYPE*)x, &incx); \ +\ +/* Add op(a_tr)rhs into res*/ \ + MKLPREFIX##axpy(&n, &alpha_,(const MKLTYPE*)x, &incx, (MKLTYPE*)_res, &incy); \ +/* Non-square case - doesn't fit to MKL ?TRMV. Fall to default triangular product*/ \ + if (size<(std::max)(rows,cols)) { \ + typedef Matrix MatrixLhs; \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ + if (size \ +struct triangular_matrix_vector_product_trmv { \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper \ + }; \ + static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \ + { \ + if (IsZeroDiag) { \ + triangular_matrix_vector_product::run( \ + _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \ + return; \ + }\ + Index size = (std::min)(_rows,_cols); \ + Index rows = IsLower ? _rows : size; \ + Index cols = IsLower ? size : _cols; \ +\ + typedef VectorX##EIGPREFIX VectorRhs; \ + EIGTYPE *x, *y;\ +\ +/* Set x*/ \ + Map > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \ + VectorRhs x_tmp; \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ +\ +/* Square part handling */\ +\ + char trans, uplo, diag; \ + MKL_INT m, n, lda, incx, incy; \ + EIGTYPE const *a; \ + MKLTYPE alpha_, beta_; \ + assign_scalar_eig2mkl(alpha_, alpha); \ + assign_scalar_eig2mkl(beta_, EIGTYPE(1)); \ +\ +/* Set m, n */ \ + n = (MKL_INT)size; \ + lda = lhsStride; \ + incx = 1; \ + incy = resIncr; \ +\ +/* Set uplo, trans and diag*/ \ + trans = ConjLhs ? 'C' : 'T'; \ + uplo = IsLower ? 'U' : 'L'; \ + diag = IsUnitDiag ? 'U' : 'N'; \ +\ +/* call ?TRMV*/ \ + MKLPREFIX##trmv(&uplo, &trans, &diag, &n, (const MKLTYPE*)_lhs, &lda, (MKLTYPE*)x, &incx); \ +\ +/* Add op(a_tr)rhs into res*/ \ + MKLPREFIX##axpy(&n, &alpha_,(const MKLTYPE*)x, &incx, (MKLTYPE*)_res, &incy); \ +/* Non-square case - doesn't fit to MKL ?TRMV. Fall to default triangular product*/ \ + if (size<(std::max)(rows,cols)) { \ + typedef Matrix MatrixLhs; \ + if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \ + x = x_tmp.data(); \ + if (size& blocking) { triangular_solve_matrix< Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft, (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper), NumTraits::IsComplex && Conjugate, TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor> - ::run(size, cols, tri, triStride, _other, otherStride); + ::run(size, cols, tri, triStride, _other, otherStride, blocking); } }; @@ -53,7 +56,8 @@ struct triangular_solve_matrix& blocking) { Index cols = otherSize; const_blas_data_mapper tri(_tri,triStride); @@ -65,22 +69,29 @@ struct triangular_solve_matrix(kc, mc, nc); + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(size,blocking.mc()); // cache block size along the M direction + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*cols; std::size_t sizeW = kc*Traits::WorkSpaceFactor; - std::size_t sizeB = sizeW + kc*cols; - ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0); - ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0); - Scalar* blockB = allocatedBlockB + sizeW; + + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW()); conj_if conj; gebp_kernel gebp_kernel; gemm_pack_lhs pack_lhs; gemm_pack_rhs pack_rhs; + // the goal here is to subdivise the Rhs panels such that we keep some cache + // coherence when accessing the rhs elements + std::ptrdiff_t l1, l2; + manage_caching_sizes(GetAction, &l1, &l2); + Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * otherStride) : 0; + subcols = std::max((subcols/Traits::nr)*Traits::nr, Traits::nr); + for(Index k2=IsLower ? 0 : size; IsLower ? k20; IsLower ? k2+=kc : k2-=kc) @@ -92,16 +103,18 @@ struct triangular_solve_matrix general block copy (done during the next step) - // - R1 = L1^-1 B => tricky part + // - R1 = A11^-1 B => tricky part // - update B from the new R1 => actually this has to be performed continuously during the above step - // - R2 = L2 * B => GEPP + // - R2 -= A21 * B => GEPP - // The tricky part: compute R1 = L1^-1 B while updating B from R1 - // The idea is to split L1 into multiple small vertical panels. - // Each panel can be split into a small triangular part A1 which is processed without optimization, - // and the remaining small part A2 which is processed using gebp with appropriate block strides + // The tricky part: compute R1 = A11^-1 B while updating B from R1 + // The idea is to split A11 into multiple small vertical panels. + // Each panel can be split into a small triangular part T1k which is processed without optimization, + // and the remaining small part T2k which is processed using gebp with appropriate block strides + for(Index j2=0; j2(actual_kc-k1, SmallPanelWidth); @@ -114,11 +127,11 @@ struct triangular_solve_matrix0) @@ -152,13 +165,13 @@ struct triangular_solve_matrix GEPP + + // R2 -= A21 * B => GEPP { Index start = IsLower ? k2+kc : 0; Index end = IsLower ? size : k2-kc; @@ -169,7 +182,7 @@ struct triangular_solve_matrix& blocking) { Index rows = otherSize; const_blas_data_mapper rhs(_tri,triStride); @@ -198,19 +212,16 @@ struct triangular_solve_matrix(Traits::Max_kc/4,size); // cache block size along the K direction -// Index mc = std::min(Traits::Max_mc,size); // cache block size along the M direction - // check that !!!! - Index kc = size; // cache block size along the K direction - Index mc = size; // cache block size along the M direction - Index nc = rows; // cache block size along the N direction - computeProductBlockingSizes(kc, mc, nc); + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + std::size_t sizeA = kc*mc; + std::size_t sizeB = kc*size; std::size_t sizeW = kc*Traits::WorkSpaceFactor; - std::size_t sizeB = sizeW + kc*size; - ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0); - ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0); - Scalar* blockB = allocatedBlockB + sizeW; + + ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB()); + ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW()); conj_if conj; gebp_kernel gebp_kernel; @@ -277,7 +288,7 @@ struct triangular_solve_matrix0) gebp_kernel(_other+i2+startPanel*otherStride, otherStride, blockA, geb, actual_mc, actual_kc, rs, Scalar(-1), - -1, -1, 0, 0, allocatedBlockB); + -1, -1, 0, 0, blockW); } } } @@ -316,4 +327,6 @@ struct triangular_solve_matrix \ +struct triangular_solve_matrix \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \ + }; \ + static EIGEN_DONT_INLINE void run( \ + Index size, Index otherSize, \ + const EIGTYPE* _tri, Index triStride, \ + EIGTYPE* _other, Index otherStride, level3_blocking& /*blocking*/) \ + { \ + MKL_INT m = size, n = otherSize, lda, ldb; \ + char side = 'L', uplo, diag='N', transa; \ + /* Set alpha_ */ \ + MKLTYPE alpha; \ + EIGTYPE myone(1); \ + assign_scalar_eig2mkl(alpha, myone); \ + ldb = otherStride;\ +\ + const EIGTYPE *a; \ +/* Set trans */ \ + transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + typedef Matrix MatrixTri; \ + Map > tri(_tri,size,size,OuterStride<>(triStride)); \ + MatrixTri a_tmp; \ +\ + if (conjA) { \ + a_tmp = tri.conjugate(); \ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else { \ + a = _tri; \ + lda = triStride; \ + } \ + if (IsUnitDiag) diag='U'; \ +/* call ?trsm*/ \ + MKLPREFIX##trsm(&side, &uplo, &transa, &diag, &m, &n, &alpha, (const MKLTYPE*)a, &lda, (MKLTYPE*)_other, &ldb); \ + } \ +}; + +EIGEN_MKL_TRSM_L(double, double, d) +EIGEN_MKL_TRSM_L(dcomplex, MKL_Complex16, z) +EIGEN_MKL_TRSM_L(float, float, s) +EIGEN_MKL_TRSM_L(scomplex, MKL_Complex8, c) + + +// implements RightSide general * op(triangular)^-1 +#define EIGEN_MKL_TRSM_R(EIGTYPE, MKLTYPE, MKLPREFIX) \ +template \ +struct triangular_solve_matrix \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \ + }; \ + static EIGEN_DONT_INLINE void run( \ + Index size, Index otherSize, \ + const EIGTYPE* _tri, Index triStride, \ + EIGTYPE* _other, Index otherStride, level3_blocking& /*blocking*/) \ + { \ + MKL_INT m = otherSize, n = size, lda, ldb; \ + char side = 'R', uplo, diag='N', transa; \ + /* Set alpha_ */ \ + MKLTYPE alpha; \ + EIGTYPE myone(1); \ + assign_scalar_eig2mkl(alpha, myone); \ + ldb = otherStride;\ +\ + const EIGTYPE *a; \ +/* Set trans */ \ + transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + typedef Matrix MatrixTri; \ + Map > tri(_tri,size,size,OuterStride<>(triStride)); \ + MatrixTri a_tmp; \ +\ + if (conjA) { \ + a_tmp = tri.conjugate(); \ + a = a_tmp.data(); \ + lda = a_tmp.outerStride(); \ + } else { \ + a = _tri; \ + lda = triStride; \ + } \ + if (IsUnitDiag) diag='U'; \ +/* call ?trsm*/ \ + MKLPREFIX##trsm(&side, &uplo, &transa, &diag, &m, &n, &alpha, (const MKLTYPE*)a, &lda, (MKLTYPE*)_other, &ldb); \ + /*std::cout << "TRMS_L specialization!\n";*/ \ + } \ +}; + +EIGEN_MKL_TRSM_R(double, double, d) +EIGEN_MKL_TRSM_R(dcomplex, MKL_Complex16, z) +EIGEN_MKL_TRSM_R(float, float, s) +EIGEN_MKL_TRSM_R(scomplex, MKL_Complex8, c) + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularSolverVector.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularSolverVector.h index 639d4a5b4..f83a81061 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularSolverVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/TriangularSolverVector.h @@ -25,6 +25,8 @@ #ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H #define EIGEN_TRIANGULAR_SOLVER_VECTOR_H +namespace Eigen { + namespace internal { template @@ -147,4 +149,6 @@ struct triangular_solve_vector struct general_matrix_matrix_product; -template +template struct general_matrix_vector_product; @@ -56,11 +58,15 @@ template struct conj_if; template<> struct conj_if { template inline T operator()(const T& x) { return conj(x); } + template + inline T pconj(const T& x) { return internal::pconj(x); } }; template<> struct conj_if { template inline const T& operator()(const T& x) { return x; } + template + inline const T& pconj(const T& x) { return x; } }; template struct conj_helper @@ -118,11 +124,11 @@ template struct conj_helper struct get_factor { - EIGEN_STRONG_INLINE static To run(const From& x) { return x; } + static EIGEN_STRONG_INLINE To run(const From& x) { return x; } }; template struct get_factor::Real> { - EIGEN_STRONG_INLINE static typename NumTraits::Real run(const Scalar& x) { return real(x); } + static EIGEN_STRONG_INLINE typename NumTraits::Real run(const Scalar& x) { return real(x); } }; // Lightweight helper class to access matrix coefficients. @@ -175,7 +181,7 @@ template struct blas_traits ExtractType, typename _ExtractType::PlainObject >::type DirectLinearAccessType; - static inline const ExtractType extract(const XprType& x) { return x; } + static inline ExtractType extract(const XprType& x) { return x; } static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); } }; @@ -192,7 +198,7 @@ struct blas_traits, NestedXpr> > IsComplex = NumTraits::IsComplex, NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex }; - static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); } }; @@ -204,7 +210,7 @@ struct blas_traits, NestedXpr> > typedef blas_traits Base; typedef CwiseUnaryOp, NestedXpr> XprType; typedef typename Base::ExtractType ExtractType; - static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) { return x.functor().m_other * Base::extractScalarFactor(x.nestedExpression()); } }; @@ -217,7 +223,7 @@ struct blas_traits, NestedXpr> > typedef blas_traits Base; typedef CwiseUnaryOp, NestedXpr> XprType; typedef typename Base::ExtractType ExtractType; - static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) { return - Base::extractScalarFactor(x.nestedExpression()); } }; @@ -239,7 +245,7 @@ struct blas_traits > enum { IsTransposed = Base::IsTransposed ? 0 : 1 }; - static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); } }; @@ -252,7 +258,7 @@ template::HasUsableDirectA struct extract_data_selector { static const typename T::Scalar* run(const T& m) { - return const_cast(&blas_traits::extract(m).coeffRef(0,0)); // FIXME this should be .data() + return blas_traits::extract(m).data(); } }; @@ -268,4 +274,6 @@ template const typename T::Scalar* extract_data(const T& m) } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_BLASUTIL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Constants.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Constants.h index c3dd3a09d..f34aac85a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Constants.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Constants.h @@ -26,6 +26,8 @@ #ifndef EIGEN_CONSTANTS_H #define EIGEN_CONSTANTS_H +namespace Eigen { + /** This value means that a quantity is not known at compile-time, and that instead the value is * stored in some runtime variable. * @@ -188,7 +190,9 @@ enum { /** View matrix as an upper triangular matrix with zeros on the diagonal. */ StrictlyUpper=ZeroDiag|Upper, /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */ - SelfAdjoint=0x10 + SelfAdjoint=0x10, + /** Used to support symmetric, non-selfadjoint, complex matrices. */ + Symmetric=0x20 }; /** \ingroup enums @@ -200,8 +204,6 @@ enum { Aligned=1 }; -enum { ConditionalJumpCost = 5 }; - /** \ingroup enums * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */ // FIXME after the corner() API change, this was not needed anymore, except by AlignedBox @@ -223,8 +225,6 @@ enum DirectionType { BothDirections }; -enum ProductEvaluationMode { NormalProduct, CacheFriendlyProduct }; - /** \internal \ingroup enums * Enum to specify how to traverse the entries of a matrix. */ enum { @@ -257,6 +257,13 @@ enum { CompleteUnrolling }; +/** \internal \ingroup enums + * Enum to specify whether to use the default (built-in) implementation or the specialization. */ +enum { + Specialized, + BuiltIn +}; + /** \ingroup enums * Enum containing possible values for the \p _Options template parameter of * Matrix, Array and BandMatrix. */ @@ -280,26 +287,21 @@ enum { OnTheRight = 2 }; -/* the following could as well be written: - * enum NoChange_t { NoChange }; - * but it feels dangerous to disambiguate overloaded functions on enum/integer types. - * If on some platform it is really impossible to get rid of "unused variable" warnings, then - * we can always come back to that solution. +/* the following used to be written as: + * + * struct NoChange_t {}; + * namespace { + * EIGEN_UNUSED NoChange_t NoChange; + * } + * + * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types. + * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE, + * and we do not know how to get rid of them (bug 450). */ -struct NoChange_t {}; -namespace { - EIGEN_UNUSED NoChange_t NoChange; -} -struct Sequential_t {}; -namespace { - EIGEN_UNUSED Sequential_t Sequential; -} - -struct Default_t {}; -namespace { - EIGEN_UNUSED Default_t Default; -} +enum NoChange_t { NoChange }; +enum Sequential_t { Sequential }; +enum Default_t { Default }; /** \internal \ingroup enums * Used in AmbiVector. */ @@ -375,7 +377,7 @@ enum QRPreconditioners { #error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h #endif -/** \ingroups enums +/** \ingroup enums * Enum for reporting the status of a computation. */ enum ComputationInfo { /** Computation was successful. */ @@ -383,7 +385,10 @@ enum ComputationInfo { /** The provided data did not satisfy the prerequisites. */ NumericalIssue = 1, /** Iterative procedure did not converge. */ - NoConvergence = 2 + NoConvergence = 2, + /** The inputs are invalid, or the algorithm has been improperly called. + * When assertions are enabled, such errors trigger an assert. */ + InvalidInput = 3 }; /** \ingroup enums @@ -436,4 +441,6 @@ struct MatrixXpr {}; /** The type used to identify an array expression */ struct ArrayXpr {}; +} // end namespace Eigen + #endif // EIGEN_CONSTANTS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/DisableStupidWarnings.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/DisableStupidWarnings.h index 00730524b..6a0bf0629 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/DisableStupidWarnings.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/DisableStupidWarnings.h @@ -21,15 +21,13 @@ #elif defined __INTEL_COMPILER // 2196 - routine is both "inline" and "noinline" ("noinline" assumed) // ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body - // 2536 - type qualifiers are meaningless here - // ICC 12 generates this warning when a function return type is const qualified, even if that type is a template-parameter-dependent // typedef that may be a reference type. // 279 - controlling expression is constant // ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case. #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS #pragma warning push #endif - #pragma warning disable 2196 2536 279 + #pragma warning disable 2196 279 #elif defined __clang__ // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant // this is really a stupid warning as it warns on compile-time expressions involving enums diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ForwardDeclarations.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ForwardDeclarations.h index 7fbccf98c..e5303f052 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ForwardDeclarations.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ForwardDeclarations.h @@ -26,6 +26,7 @@ #ifndef EIGEN_FORWARDDECLARATIONS_H #define EIGEN_FORWARDDECLARATIONS_H +namespace Eigen { namespace internal { template struct traits; @@ -133,6 +134,7 @@ template class WithFormat; template struct CommaInitializer; template class ReturnByValue; template class ArrayWrapper; +template class MatrixWrapper; namespace internal { template struct solve_retval_base; @@ -282,6 +284,8 @@ template class Homogeneous; // MatrixFunctions module template struct MatrixExponentialReturnValue; template class MatrixFunctionReturnValue; +template class MatrixSquareRootReturnValue; +template class MatrixLogarithmReturnValue; namespace internal { template @@ -304,4 +308,6 @@ template struct eigen2_part_return_type; } #endif +} // end namespace Eigen + #endif // EIGEN_FORWARDDECLARATIONS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/MKL_support.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/MKL_support.h new file mode 100644 index 000000000..1e6e355d6 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/MKL_support.h @@ -0,0 +1,109 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Include file with common MKL declarations + ******************************************************************************** +*/ + +#ifndef EIGEN_MKL_SUPPORT_H +#define EIGEN_MKL_SUPPORT_H + +#ifdef EIGEN_USE_MKL_ALL + #ifndef EIGEN_USE_BLAS + #define EIGEN_USE_BLAS + #endif + #ifndef EIGEN_USE_LAPACKE + #define EIGEN_USE_LAPACKE + #endif + #ifndef EIGEN_USE_MKL_VML + #define EIGEN_USE_MKL_VML + #endif +#endif + +#ifdef EIGEN_USE_LAPACKE_STRICT + #define EIGEN_USE_LAPACKE +#endif + +#if defined(EIGEN_USE_BLAS) || defined(EIGEN_USE_LAPACKE) || defined(EIGEN_USE_MKL_VML) + #define EIGEN_USE_MKL +#endif + +#if defined EIGEN_USE_MKL + +#include +#include +#define EIGEN_MKL_VML_THRESHOLD 128 + +namespace Eigen { + +typedef std::complex dcomplex; +typedef std::complex scomplex; + +namespace internal { + +template +static inline void assign_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) { + mklScalar=eigenScalar; +} + +template +static inline void assign_conj_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) { + mklScalar=eigenScalar; +} + +template <> +inline void assign_scalar_eig2mkl(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) { + mklScalar.real=eigenScalar.real(); + mklScalar.imag=eigenScalar.imag(); +} + +template <> +inline void assign_scalar_eig2mkl(MKL_Complex8& mklScalar, const scomplex& eigenScalar) { + mklScalar.real=eigenScalar.real(); + mklScalar.imag=eigenScalar.imag(); +} + +template <> +inline void assign_conj_scalar_eig2mkl(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) { + mklScalar.real=eigenScalar.real(); + mklScalar.imag=-eigenScalar.imag(); +} + +template <> +inline void assign_conj_scalar_eig2mkl(MKL_Complex8& mklScalar, const scomplex& eigenScalar) { + mklScalar.real=eigenScalar.real(); + mklScalar.imag=-eigenScalar.imag(); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif + +#endif // EIGEN_MKL_SUPPORT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h index b7c2b79af..b361a05d4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h @@ -1,4 +1,3 @@ - // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // @@ -28,8 +27,8 @@ #define EIGEN_MACROS_H #define EIGEN_WORLD_VERSION 3 -#define EIGEN_MAJOR_VERSION 0 -#define EIGEN_MINOR_VERSION 5 +#define EIGEN_MAJOR_VERSION 1 +#define EIGEN_MINOR_VERSION 0 #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ @@ -235,12 +234,16 @@ #define EIGEN_ONLY_USED_FOR_DEBUG(x) #endif -#if (defined __GNUC__) -#define EIGEN_DEPRECATED __attribute__((deprecated)) -#elif (defined _MSC_VER) -#define EIGEN_DEPRECATED __declspec(deprecated) +#ifndef EIGEN_NO_DEPRECATED_WARNING + #if (defined __GNUC__) + #define EIGEN_DEPRECATED __attribute__((deprecated)) + #elif (defined _MSC_VER) + #define EIGEN_DEPRECATED __declspec(deprecated) + #else + #define EIGEN_DEPRECATED + #endif #else -#define EIGEN_DEPRECATED + #define EIGEN_DEPRECATED #endif #if (defined __GNUC__) @@ -252,7 +255,7 @@ // Suppresses 'unused variable' warnings. #define EIGEN_UNUSED_VARIABLE(var) (void)var; -#if (defined __GNUC__) +#if !defined(EIGEN_ASM_COMMENT) && (defined __GNUC__) #define EIGEN_ASM_COMMENT(X) asm("#" X) #else #define EIGEN_ASM_COMMENT(X) @@ -265,7 +268,7 @@ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link * vectorized and non-vectorized code. */ -#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) +#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) || (defined __ARMCC_VERSION) #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n))) #elif (defined _MSC_VER) #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n)) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h index 023716dc9..56a16b5cb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h @@ -80,6 +80,8 @@ #define EIGEN_HAS_MM_MALLOC 0 #endif +namespace Eigen { + namespace internal { inline void throw_std_bad_alloc() @@ -457,7 +459,7 @@ template inline void conditional_aligned_delete_auto(T * * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. */ template -inline static Index first_aligned(const Scalar* array, Index size) +static inline Index first_aligned(const Scalar* array, Index size) { typedef typename packet_traits::type Packet; enum { PacketSize = packet_traits::size, @@ -483,7 +485,26 @@ inline static Index first_aligned(const Scalar* array, Index size) } } -} // end namespace internal + +// std::copy is much slower than memcpy, so let's introduce a smart_copy which +// use memcpy on trivial types, i.e., on types that does not require an initialization ctor. +template struct smart_copy_helper; + +template void smart_copy(const T* start, const T* end, T* target) +{ + smart_copy_helper::RequireInitialization>::run(start, end, target); +} + +template struct smart_copy_helper { + static inline void run(const T* start, const T* end, T* target) + { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); } +}; + +template struct smart_copy_helper { + static inline void run(const T* start, const T* end, T* target) + { std::copy(start, end, target); } +}; + /***************************************************************************** *** Implementation of runtime stack allocation (falling back to malloc) *** @@ -499,8 +520,6 @@ inline static Index first_aligned(const Scalar* array, Index size) #endif #endif -namespace internal { - // This helper class construct the allocated memory, and takes care of destructing and freeing the handled data // at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions. template class aligned_stack_memory_handler @@ -531,14 +550,14 @@ template class aligned_stack_memory_handler bool m_deallocate; }; -} +} // end namespace internal /** \internal * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap. * The allocated buffer is automatically deleted when exiting the scope of this declaration. - * If BUFFER is non nul, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. + * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. * Here is an example: * \code * { @@ -619,7 +638,7 @@ template class aligned_stack_memory_handler #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true) #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \ - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0)) + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0))) /****************************************************************************/ @@ -667,24 +686,24 @@ public: return &value; } - aligned_allocator() throw() + aligned_allocator() { } - aligned_allocator( const aligned_allocator& ) throw() + aligned_allocator( const aligned_allocator& ) { } template - aligned_allocator( const aligned_allocator& ) throw() + aligned_allocator( const aligned_allocator& ) { } - ~aligned_allocator() throw() + ~aligned_allocator() { } - size_type max_size() const throw() + size_type max_size() const { return (std::numeric_limits::max)(); } @@ -701,6 +720,15 @@ public: ::new( p ) T( value ); } + // Support for c++11 +#if (__cplusplus >= 201103L) + template + void construct(pointer p, Args&&... args) + { + ::new(p) T(std::forward(args)...); + } +#endif + void destroy( pointer p ) { p->~T(); @@ -720,19 +748,21 @@ public: //---------- Cache sizes ---------- -#if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) -# if defined(__PIC__) && defined(__i386__) - // Case for x86 with PIC -# define EIGEN_CPUID(abcd,func,id) \ - __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); -# else - // Case for x86_64 or x86 w/o PIC -# define EIGEN_CPUID(abcd,func,id) \ - __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) ); -# endif -#elif defined(_MSC_VER) -# if (_MSC_VER > 1500) -# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) +#if !defined(EIGEN_NO_CPUID) +# if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) +# if defined(__PIC__) && defined(__i386__) + // Case for x86 with PIC +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); +# else + // Case for x86_64 or x86 w/o PIC +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) ); +# endif +# elif defined(_MSC_VER) +# if (_MSC_VER > 1500) +# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) +# endif # endif #endif @@ -742,7 +772,7 @@ namespace internal { inline bool cpuid_is_vendor(int abcd[4], const char* vendor) { - return abcd[1]==((int*)(vendor))[0] && abcd[3]==((int*)(vendor))[1] && abcd[2]==((int*)(vendor))[2]; + return abcd[1]==(reinterpret_cast(vendor))[0] && abcd[3]==(reinterpret_cast(vendor))[1] && abcd[2]==(reinterpret_cast(vendor))[2]; } inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) @@ -932,4 +962,6 @@ inline int queryTopLevelCacheSize() } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_MEMORY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Meta.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Meta.h index 4518261ef..82c93f922 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Meta.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Meta.h @@ -26,6 +26,8 @@ #ifndef EIGEN_META_H #define EIGEN_META_H +namespace Eigen { + namespace internal { /** \internal @@ -80,8 +82,6 @@ template<> struct is_arithmetic { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; -template<> struct is_arithmetic { enum { value = true }; }; -template<> struct is_arithmetic { enum { value = true }; }; template struct add_const { typedef const T type; }; template struct add_const { typedef T& type; }; @@ -103,6 +103,21 @@ template struct enable_if; template struct enable_if { typedef T type; }; + + +/** \internal + * A base class do disable default copy ctor and copy assignement operator. + */ +class noncopyable +{ + noncopyable(const noncopyable&); + const noncopyable& operator=(const noncopyable&); +protected: + noncopyable() {} + ~noncopyable() {} +}; + + /** \internal * Convenient struct to get the result type of a unary or binary functor. * @@ -226,4 +241,6 @@ template struct is_diagonal > } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_META_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/StaticAssert.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/StaticAssert.h index 99c7c9972..9a5dbbbf3 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/StaticAssert.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/StaticAssert.h @@ -48,6 +48,8 @@ #else // not CXX0X + namespace Eigen { + namespace internal { template @@ -70,6 +72,7 @@ YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR, UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES, + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED, NUMERIC_TYPE_MUST_BE_REAL, COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED, WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED, @@ -95,12 +98,20 @@ YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION, THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY, YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT, - THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL, + THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES, + YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED, + YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED, + THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE, + THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH }; }; } // end namespace internal + } // end namespace Eigen + // Specialized implementation for MSVC to avoid "conditional // expression is constant" warnings. This implementation doesn't // appear to work under GCC, hence the multiple implementations. @@ -195,4 +206,15 @@ EIGEN_STATIC_ASSERT(internal::is_lvalue::value, \ THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY) +#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \ + EIGEN_STATIC_ASSERT((internal::is_same::XprKind, ArrayXpr>::value), \ + THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES) + +#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \ + EIGEN_STATIC_ASSERT((internal::is_same::XprKind, \ + typename internal::traits::XprKind \ + >::value), \ + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES) + + #endif // EIGEN_STATIC_ASSERT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/XprHelper.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/XprHelper.h index c2078f137..5bb0a624f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/XprHelper.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/XprHelper.h @@ -37,6 +37,8 @@ #define EIGEN_EMPTY_STRUCT_CTOR(X) #endif +namespace Eigen { + typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; namespace internal { @@ -260,30 +262,27 @@ template struct plain_matrix_type_row_major // we should be able to get rid of this one too template struct must_nest_by_value { enum { ret = false }; }; -template -struct is_reference -{ - enum { ret = false }; -}; - -template -struct is_reference -{ - enum { ret = true }; -}; - -/** -* \internal The reference selector for template expressions. The idea is that we don't -* need to use references for expressions since they are light weight proxy -* objects which should generate no copying overhead. -**/ +/** \internal The reference selector for template expressions. The idea is that we don't + * need to use references for expressions since they are light weight proxy + * objects which should generate no copying overhead. */ template struct ref_selector { typedef typename conditional< bool(traits::Flags & NestByRefBit), T const&, - T + const T + >::type type; +}; + +/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */ +template +struct transfer_constness +{ + typedef typename conditional< + bool(internal::is_const::value), + typename internal::add_const_on_value_type::type, + T2 >::type type; }; @@ -297,6 +296,8 @@ struct ref_selector * \param T the type of the expression being nested * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression. * + * Note that if no evaluation occur, then the constness of T is preserved. + * * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c). * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it, * the Product expression uses: nested::ret, which turns out to be Matrix3d because the internal logic of @@ -456,4 +457,6 @@ struct is_lvalue } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_XPRHELPER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Block.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Block.h index bc28051e0..d77ff62bb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Block.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Block.h @@ -26,6 +26,8 @@ #ifndef EIGEN_BLOCK2_H #define EIGEN_BLOCK2_H +namespace Eigen { + /** \returns a dynamic-size expression of a corner of *this. * * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight, @@ -134,4 +136,6 @@ DenseBase::corner(CornerType type) const } } +} // end namespace Eigen + #endif // EIGEN_BLOCK2_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h index 2dc83b6a7..383645b8e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h @@ -26,6 +26,8 @@ #ifndef EIGEN_CWISE_H #define EIGEN_CWISE_H +namespace Eigen { + /** \internal * convenient macro to defined the return type of a cwise binary operation */ #define EIGEN_CWISE_BINOP_RETURN_TYPE(OP) \ @@ -200,4 +202,6 @@ inline Cwise MatrixBase::cwise() return derived(); } +} // end namespace Eigen + #endif // EIGEN_CWISE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h index 9c28559c3..207a167c1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ARRAY_CWISE_OPERATORS_H #define EIGEN_ARRAY_CWISE_OPERATORS_H +namespace Eigen { + /*************************************************************************** * The following functions were defined in Core ***************************************************************************/ @@ -306,4 +308,6 @@ inline ExpressionType& Cwise::operator-=(const Scalar& scalar) return m_matrix.const_cast_derived() = *this - scalar; } +} // end namespace Eigen + #endif // EIGEN_ARRAY_CWISE_OPERATORS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h index 78df29d40..dd29dfc34 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h @@ -24,6 +24,8 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * \nonstableyet * @@ -63,7 +65,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim== ~AlignedBox() {} /** \returns the dimension in which the box holds */ - inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : int(AmbientDimAtCompileTime); } + inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; } /** \returns true if the box is null, i.e, empty. */ inline bool isNull() const { return (m_min.cwise() > m_max).any(); } @@ -157,14 +159,16 @@ protected: template inline Scalar AlignedBox::squaredExteriorDistance(const VectorType& p) const { - Scalar dist2 = 0.; + Scalar dist2(0); Scalar aux; for (int k=0; k::toRotationMatrix(void) const return res; } + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h index 81c4f55b1..8b4f7a080 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Hyperplane.h @@ -25,6 +25,8 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class Hyperplane @@ -263,3 +265,5 @@ protected: Coefficients m_coeffs; }; + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h index 411c4b570..cc8eb7089 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h @@ -25,6 +25,7 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { /** \geometry_module \ingroup Geometry_Module * @@ -151,3 +152,5 @@ inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperp return -(hyperplane.offset()+origin().eigen2_dot(hyperplane.normal())) /(direction().eigen2_dot(hyperplane.normal())); } + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h index a75fa42ae..616671c67 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Quaternion.h @@ -24,6 +24,8 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { + template @@ -143,7 +145,7 @@ public: /** \returns a quaternion representing an identity rotation * \sa MatrixBase::Identity() */ - inline static Quaternion Identity() { return Quaternion(1, 0, 0, 0); } + static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); } /** \sa Quaternion::Identity(), MatrixBase::setIdentity() */ @@ -314,9 +316,9 @@ Quaternion::toRotationMatrix(void) const // it has to be inlined, and so the return by value is not an issue Matrix3 res; - const Scalar tx = 2*this->x(); - const Scalar ty = 2*this->y(); - const Scalar tz = 2*this->z(); + const Scalar tx = Scalar(2)*this->x(); + const Scalar ty = Scalar(2)*this->y(); + const Scalar tz = Scalar(2)*this->z(); const Scalar twx = tx*this->w(); const Scalar twy = ty*this->w(); const Scalar twz = tz*this->w(); @@ -327,15 +329,15 @@ Quaternion::toRotationMatrix(void) const const Scalar tyz = tz*this->y(); const Scalar tzz = tz*this->z(); - res.coeffRef(0,0) = 1-(tyy+tzz); + res.coeffRef(0,0) = Scalar(1)-(tyy+tzz); res.coeffRef(0,1) = txy-twz; res.coeffRef(0,2) = txz+twy; res.coeffRef(1,0) = txy+twz; - res.coeffRef(1,1) = 1-(txx+tzz); + res.coeffRef(1,1) = Scalar(1)-(txx+tzz); res.coeffRef(1,2) = tyz-twx; res.coeffRef(2,0) = txz-twy; res.coeffRef(2,1) = tyz+twx; - res.coeffRef(2,2) = 1-(txx+tyy); + res.coeffRef(2,2) = Scalar(1)-(txx+tyy); return res; } @@ -460,7 +462,7 @@ template struct ei_quaternion_assign_impl { typedef typename Other::Scalar Scalar; - inline static void run(Quaternion& q, const Other& mat) + static inline void run(Quaternion& q, const Other& mat) { // This algorithm comes from "Quaternion Calculus and Fast Animation", // Ken Shoemake, 1987 SIGGRAPH course notes @@ -499,8 +501,10 @@ template struct ei_quaternion_assign_impl { typedef typename Other::Scalar Scalar; - inline static void run(Quaternion& q, const Other& vec) + static inline void run(Quaternion& q, const Other& vec) { q.coeffs() = vec; } }; + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h index ee7c80e7e..0993fa5bb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Rotation2D.h @@ -24,6 +24,7 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { /** \geometry_module \ingroup Geometry_Module * @@ -155,3 +156,5 @@ Rotation2D::toRotationMatrix(void) const Scalar cosA = ei_cos(m_angle); return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); } + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h index 2f494f198..b65abfe0d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/RotationBase.h @@ -24,6 +24,8 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { + // this file aims to contains the various representations of rotation/orientation // in 2D and 3D space excepted Matrix and Quaternion. @@ -113,22 +115,24 @@ Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis */ template -inline static Matrix ei_toRotationMatrix(const Scalar& s) +static inline Matrix ei_toRotationMatrix(const Scalar& s) { EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) return Rotation2D(s).toRotationMatrix(); } template -inline static Matrix ei_toRotationMatrix(const RotationBase& r) +static inline Matrix ei_toRotationMatrix(const RotationBase& r) { return r.toRotationMatrix(); } template -inline static const MatrixBase& ei_toRotationMatrix(const MatrixBase& mat) +static inline const MatrixBase& ei_toRotationMatrix(const MatrixBase& mat) { EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, YOU_MADE_A_PROGRAMMING_MISTAKE) return mat; } + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h index 108e6d7d5..8e47c78fe 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Scaling.h @@ -24,6 +24,7 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { /** \geometry_module \ingroup Geometry_Module * @@ -177,3 +178,5 @@ Scaling::operator* (const TransformType& t) const res.prescale(m_coeffs); return res; } + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Transform.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Transform.h index 88956c86c..28dcc03b7 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Transform.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Transform.h @@ -25,6 +25,7 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { // Note that we have to pass Dim and HDim because it is not allowed to use a template // parameter to define a template specialization. To be more precise, in the following @@ -796,3 +797,5 @@ struct ei_transform_product_impl { return ((tr.linear() * other) + tr.translation()) * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); } }; + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Translation.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Translation.h index e651e3102..dd6256893 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Translation.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/Translation.h @@ -24,6 +24,7 @@ // no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway +namespace Eigen { /** \geometry_module \ingroup Geometry_Module * @@ -194,3 +195,5 @@ Translation::operator* (const TransformType& t) const res.pretranslate(m_coeffs); return res; } + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/LU.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/LU.h index c23c11baa..0620096af 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/LU.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/LU.h @@ -25,6 +25,8 @@ #ifndef EIGEN2_LU_H #define EIGEN2_LU_H +namespace Eigen { + template class LU : public FullPivLU { @@ -57,7 +59,6 @@ class LU : public FullPivLU > ImageResultType; typedef FullPivLU Base; - LU() : Base() {} template explicit LU(const T& t) : Base(t), m_originalMatrix(t) {} @@ -129,5 +130,6 @@ MatrixBase::eigen2_lu() const } #endif +} // end namespace Eigen #endif // EIGEN2_LU_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Lazy.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Lazy.h index c4288ede2..a1fb9c753 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Lazy.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Lazy.h @@ -25,6 +25,8 @@ #ifndef EIGEN_LAZY_H #define EIGEN_LAZY_H +namespace Eigen { + /** \deprecated it is only used by lazy() which is deprecated * * \returns an expression of *this with added flags @@ -79,4 +81,6 @@ Derived& MatrixBase::operator-=(const Flaggedoffset() = - (result->normal().cwise()* mean).sum(); } +} // end namespace Eigen #endif // EIGEN2_LEASTSQUARES_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/MathFunctions.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/MathFunctions.h index caa44e63f..2baf4bb8f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/MathFunctions.h @@ -25,6 +25,8 @@ #ifndef EIGEN2_MATH_FUNCTIONS_H #define EIGEN2_MATH_FUNCTIONS_H +namespace Eigen { + template inline typename NumTraits::Real ei_real(const T& x) { return internal::real(x); } template inline typename NumTraits::Real ei_imag(const T& x) { return internal::imag(x); } template inline T ei_conj(const T& x) { return internal::conj(x); } @@ -65,4 +67,6 @@ inline bool ei_isApproxOrLessThan(const Scalar& x, const Scalar& y, return internal::isApproxOrLessThan(x, y, precision); } +} // end namespace Eigen + #endif // EIGEN2_MATH_FUNCTIONS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Memory.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Memory.h index 028347541..0588e2b34 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Memory.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Memory.h @@ -25,6 +25,8 @@ #ifndef EIGEN2_MEMORY_H #define EIGEN2_MEMORY_H +namespace Eigen { + inline void* ei_aligned_malloc(size_t size) { return internal::aligned_malloc(size); } inline void ei_aligned_free(void *ptr) { internal::aligned_free(ptr); } inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) { return internal::aligned_realloc(ptr, new_size, old_size); } @@ -53,6 +55,6 @@ template inline void ei_aligned_delete(T *ptr, size_t size) return internal::aligned_delete(ptr, size); } - +} // end namespace Eigen #endif // EIGEN2_MACROS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Meta.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Meta.h index 6e500b79a..70c210822 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Meta.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Meta.h @@ -25,6 +25,8 @@ #ifndef EIGEN2_META_H #define EIGEN2_META_H +namespace Eigen { + template struct ei_traits : internal::traits {}; @@ -83,4 +85,6 @@ class ei_meta_sqrt template class ei_meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; +} // end namespace Eigen + #endif // EIGEN2_META_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Minor.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Minor.h index eda91cc32..964e9546d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Minor.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Minor.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MINOR_H #define EIGEN_MINOR_H +namespace Eigen { + /** * \class Minor * @@ -125,4 +127,6 @@ MatrixBase::minor(Index row, Index col) const return Minor(derived(), row, col); } +} // end namespace Eigen + #endif // EIGEN_MINOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/QR.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/QR.h index 64f5d5ccb..60fc21f56 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/QR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/QR.h @@ -26,6 +26,8 @@ #ifndef EIGEN2_QR_H #define EIGEN2_QR_H +namespace Eigen { + template class QR : public HouseholderQR { @@ -75,5 +77,6 @@ MatrixBase::qr() const return QR(eval()); } +} // end namespace Eigen #endif // EIGEN2_QR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h index 16b4b488f..ff3b8a416 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h @@ -25,6 +25,8 @@ #ifndef EIGEN2_SVD_H #define EIGEN2_SVD_H +namespace Eigen { + /** \ingroup SVD_Module * \nonstableyet * @@ -390,7 +392,7 @@ void SVD::compute(const MatrixType& matrix) Scalar ek = e[k]/scale; Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/Scalar(2); Scalar c = (sp*epm1)*(sp*epm1); - Scalar shift = 0.0; + Scalar shift(0); if ((b != 0.0) || (c != 0.0)) { shift = ei_sqrt(b*b + c); @@ -646,4 +648,6 @@ MatrixBase::svd() const return SVD(derived()); } +} // end namespace Eigen + #endif // EIGEN2_SVD_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/TriangularSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/TriangularSolver.h index e94e47a50..e3374d8c0 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/TriangularSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/TriangularSolver.h @@ -25,6 +25,8 @@ #ifndef EIGEN_TRIANGULAR_SOLVER2_H #define EIGEN_TRIANGULAR_SOLVER2_H +namespace Eigen { + const unsigned int UnitDiagBit = UnitDiag; const unsigned int SelfAdjointBit = SelfAdjoint; const unsigned int UpperTriangularBit = Upper; @@ -49,5 +51,7 @@ void Flagged::solveTriangularInPlace(const MatrixB { m_matrix.template triangularView().solveInPlace(other.derived()); } + +} // end namespace Eigen #endif // EIGEN_TRIANGULAR_SOLVER2_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/VectorBlock.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/VectorBlock.h index 010031d19..8967c9019 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/VectorBlock.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/VectorBlock.h @@ -26,6 +26,8 @@ #ifndef EIGEN2_VECTORBLOCK_H #define EIGEN2_VECTORBLOCK_H +namespace Eigen { + /** \deprecated use DenseMase::head(Index) */ template inline VectorBlock @@ -102,4 +104,6 @@ MatrixBase::end() const return VectorBlock(derived(), size() - Size); } +} // end namespace Eigen + #endif // EIGEN2_VECTORBLOCK_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h index 57e00227d..91b4fa1e2 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h @@ -27,9 +27,10 @@ #ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H #define EIGEN_COMPLEX_EIGEN_SOLVER_H -#include "./EigenvaluesCommon.h" #include "./ComplexSchur.h" +namespace Eigen { + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -328,5 +329,6 @@ void ComplexEigenSolver::sortEigenvalues(bool computeEigenvectors) } } +} // end namespace Eigen #endif // EIGEN_COMPLEX_EIGEN_SOLVER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h index ec93af2e5..1a49cca13 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h @@ -27,9 +27,10 @@ #ifndef EIGEN_COMPLEX_SCHUR_H #define EIGEN_COMPLEX_SCHUR_H -#include "./EigenvaluesCommon.h" #include "./HessenbergDecomposition.h" +namespace Eigen { + namespace internal { template struct complex_schur_reduce_to_hessenberg; } @@ -227,46 +228,6 @@ template class ComplexSchur friend struct internal::complex_schur_reduce_to_hessenberg::IsComplex>; }; -namespace internal { - -/** Computes the principal value of the square root of the complex \a z. */ -template -std::complex sqrt(const std::complex &z) -{ - RealScalar t, tre, tim; - - t = abs(z); - - if (abs(real(z)) <= abs(imag(z))) - { - // No cancellation in these formulas - tre = sqrt(RealScalar(0.5)*(t + real(z))); - tim = sqrt(RealScalar(0.5)*(t - real(z))); - } - else - { - // Stable computation of the above formulas - if (z.real() > RealScalar(0)) - { - tre = t + z.real(); - tim = abs(imag(z))*sqrt(RealScalar(0.5)/tre); - tre = sqrt(RealScalar(0.5)*tre); - } - else - { - tim = t - z.real(); - tre = abs(imag(z))*sqrt(RealScalar(0.5)/tim); - tim = sqrt(RealScalar(0.5)*tim); - } - } - if(z.imag() < RealScalar(0)) - tim = -tim; - - return (std::complex(tre,tim)); -} -} // end namespace internal - - /** If m_matT(i+1,i) is neglegible in floating point arithmetic * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and * return true, else return false. */ @@ -302,7 +263,7 @@ typename ComplexSchur::ComplexScalar ComplexSchur::compu ComplexScalar b = t.coeff(0,1) * t.coeff(1,0); ComplexScalar c = t.coeff(0,0) - t.coeff(1,1); - ComplexScalar disc = internal::sqrt(c*c + RealScalar(4)*b); + ComplexScalar disc = sqrt(c*c + RealScalar(4)*b); ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b; ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1); ComplexScalar eival1 = (trace + disc) / RealScalar(2); @@ -445,4 +406,6 @@ void ComplexSchur::reduceToTriangularForm(bool computeU) m_matUisUptodate = computeU; } +} // end namespace Eigen + #endif // EIGEN_COMPLEX_SCHUR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h new file mode 100644 index 000000000..e21a8d836 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h @@ -0,0 +1,94 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Complex Schur needed to complex unsymmetrical eigenvalues/eigenvectors. + ******************************************************************************** +*/ + +#ifndef EIGEN_COMPLEX_SCHUR_MKL_H +#define EIGEN_COMPLEX_SCHUR_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_SCHUR_COMPLEX(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \ +template<> \ +ComplexSchur >& \ +ComplexSchur >::compute(const Matrix& matrix, bool computeU) \ +{ \ + typedef Matrix MatrixType; \ + typedef MatrixType::Scalar Scalar; \ + typedef MatrixType::RealScalar RealScalar; \ + typedef std::complex ComplexScalar; \ +\ + assert(matrix.cols() == matrix.rows()); \ +\ + m_matUisUptodate = false; \ + if(matrix.cols() == 1) \ + { \ + m_matT = matrix.cast(); \ + if(computeU) m_matU = ComplexMatrixType::Identity(1,1); \ + m_info = Success; \ + m_isInitialized = true; \ + m_matUisUptodate = computeU; \ + return *this; \ + } \ + lapack_int n = matrix.cols(), sdim, info; \ + lapack_int lda = matrix.outerStride(); \ + lapack_int matrix_order = MKLCOLROW; \ + char jobvs, sort='N'; \ + LAPACK_##MKLPREFIX_U##_SELECT1 select = 0; \ + jobvs = (computeU) ? 'V' : 'N'; \ + m_matU.resize(n, n); \ + lapack_int ldvs = m_matU.outerStride(); \ + m_matT = matrix; \ + Matrix w; \ + w.resize(n, 1);\ + info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)w.data(), (MKLTYPE*)m_matU.data(), ldvs ); \ + if(info == 0) \ + m_info = Success; \ + else \ + m_info = NoConvergence; \ +\ + m_isInitialized = true; \ + m_matUisUptodate = computeU; \ + return *this; \ +\ +} + +EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8, c, C, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8, c, C, RowMajor, LAPACK_ROW_MAJOR) + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_SCHUR_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h index f57353c06..f9365ae59 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h @@ -26,9 +26,10 @@ #ifndef EIGEN_EIGENSOLVER_H #define EIGEN_EIGENSOLVER_H -#include "./EigenvaluesCommon.h" #include "./RealSchur.h" +namespace Eigen { + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -432,7 +433,7 @@ void EigenSolver::doComputeEigenvectors() const Scalar eps = NumTraits::epsilon(); // inefficient! this is already computed in RealSchur - Scalar norm = 0.0; + Scalar norm(0); for (Index j = 0; j < size; ++j) { norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); @@ -452,7 +453,7 @@ void EigenSolver::doComputeEigenvectors() // Scalar vector if (q == Scalar(0)) { - Scalar lastr=0, lastw=0; + Scalar lastr(0), lastw(0); Index l = n; m_matT.coeffRef(n,n) = 1.0; @@ -498,7 +499,7 @@ void EigenSolver::doComputeEigenvectors() } else if (q < Scalar(0) && n > 0) // Complex vector { - Scalar lastra=0, lastsa=0, lastw=0; + Scalar lastra(0), lastsa(0), lastw(0); Index l = n-1; // Last vector component imaginary so matrix is triangular @@ -588,4 +589,6 @@ void EigenSolver::doComputeEigenvectors() } } +} // end namespace Eigen + #endif // EIGEN_EIGENSOLVER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h index 980af14ce..4eb2b229d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h @@ -26,9 +26,10 @@ #ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H #define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H -#include "./EigenvaluesCommon.h" #include "./Tridiagonalization.h" +namespace Eigen { + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -236,4 +237,6 @@ compute(const MatrixType& matA, const MatrixType& matB, int options) return *this; } +} // end namespace Eigen + #endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h index c17f155a5..88e63eba4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -26,6 +26,8 @@ #ifndef EIGEN_HESSENBERGDECOMPOSITION_H #define EIGEN_HESSENBERGDECOMPOSITION_H +namespace Eigen { + namespace internal { template struct HessenbergDecompositionMatrixHReturnType; @@ -379,6 +381,8 @@ template struct HessenbergDecompositionMatrixHReturnType const HessenbergDecomposition& m_hess; }; -} +} // end namespace internal + +} // end namespace Eigen #endif // EIGEN_HESSENBERGDECOMPOSITION_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h index 5591519fb..a004e7e63 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h @@ -26,6 +26,8 @@ #ifndef EIGEN_MATRIXBASEEIGENVALUES_H #define EIGEN_MATRIXBASEEIGENVALUES_H +namespace Eigen { + namespace internal { template @@ -167,4 +169,6 @@ SelfAdjointView::operatorNorm() const return eigenvalues().cwiseAbs().maxCoeff(); } +} // end namespace Eigen + #endif diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h index cc9af11c1..e204344e0 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h @@ -26,9 +26,10 @@ #ifndef EIGEN_REAL_SCHUR_H #define EIGEN_REAL_SCHUR_H -#include "./EigenvaluesCommon.h" #include "./HessenbergDecomposition.h" +namespace Eigen { + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -235,41 +236,43 @@ RealSchur& RealSchur::compute(const MatrixType& matrix, // Rows iu+1,...,end are already brought in triangular form. Index iu = m_matT.cols() - 1; Index iter = 0; // iteration count - Scalar exshift = 0.0; // sum of exceptional shifts + Scalar exshift(0); // sum of exceptional shifts Scalar norm = computeNormOfT(); - while (iu >= 0) + if(norm!=0) { - Index il = findSmallSubdiagEntry(iu, norm); + while (iu >= 0) + { + Index il = findSmallSubdiagEntry(iu, norm); - // Check for convergence - if (il == iu) // One root found - { - m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift; - if (iu > 0) - m_matT.coeffRef(iu, iu-1) = Scalar(0); - iu--; - iter = 0; + // Check for convergence + if (il == iu) // One root found + { + m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift; + if (iu > 0) + m_matT.coeffRef(iu, iu-1) = Scalar(0); + iu--; + iter = 0; + } + else if (il == iu-1) // Two roots found + { + splitOffTwoRows(iu, computeU, exshift); + iu -= 2; + iter = 0; + } + else // No convergence yet + { + // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG ) + Vector3s firstHouseholderVector(0,0,0), shiftInfo; + computeShift(iu, iter, exshift, shiftInfo); + iter = iter + 1; + if (iter > m_maxIterations) break; + Index im; + initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector); + performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace); + } } - else if (il == iu-1) // Two roots found - { - splitOffTwoRows(iu, computeU, exshift); - iu -= 2; - iter = 0; - } - else // No convergence yet - { - // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG ) - Vector3s firstHouseholderVector(0,0,0), shiftInfo; - computeShift(iu, iter, exshift, shiftInfo); - iter = iter + 1; - if (iter > m_maxIterations) break; - Index im; - initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector); - performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace); - } - } - + } if(iter <= m_maxIterations) m_info = Success; else @@ -288,7 +291,7 @@ inline typename MatrixType::Scalar RealSchur::computeNormOfT() // FIXME to be efficient the following would requires a triangular reduxion code // Scalar norm = m_matT.upper().cwiseAbs().sum() // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); - Scalar norm = 0.0; + Scalar norm(0); for (Index j = 0; j < size; ++j) norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); return norm; @@ -471,4 +474,6 @@ inline void RealSchur::performFrancisQRStep(Index il, Index im, Inde } } +} // end namespace Eigen + #endif // EIGEN_REAL_SCHUR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h new file mode 100644 index 000000000..c9689520b --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h @@ -0,0 +1,83 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Real Schur needed to real unsymmetrical eigenvalues/eigenvectors. + ******************************************************************************** +*/ + +#ifndef EIGEN_REAL_SCHUR_MKL_H +#define EIGEN_REAL_SCHUR_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_SCHUR_REAL(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \ +template<> \ +RealSchur >& \ +RealSchur >::compute(const Matrix& matrix, bool computeU) \ +{ \ + typedef Matrix MatrixType; \ + typedef MatrixType::Scalar Scalar; \ + typedef MatrixType::RealScalar RealScalar; \ +\ + assert(matrix.cols() == matrix.rows()); \ +\ + lapack_int n = matrix.cols(), sdim, info; \ + lapack_int lda = matrix.outerStride(); \ + lapack_int matrix_order = MKLCOLROW; \ + char jobvs, sort='N'; \ + LAPACK_##MKLPREFIX_U##_SELECT2 select = 0; \ + jobvs = (computeU) ? 'V' : 'N'; \ + m_matU.resize(n, n); \ + lapack_int ldvs = m_matU.outerStride(); \ + m_matT = matrix; \ + Matrix wr, wi; \ + wr.resize(n, 1); wi.resize(n, 1); \ + info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)wr.data(), (MKLTYPE*)wi.data(), (MKLTYPE*)m_matU.data(), ldvs ); \ + if(info == 0) \ + m_info = Success; \ + else \ + m_info = NoConvergence; \ +\ + m_isInitialized = true; \ + m_matUisUptodate = computeU; \ + return *this; \ +\ +} + +EIGEN_MKL_SCHUR_REAL(double, double, d, D, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_SCHUR_REAL(float, float, s, S, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_SCHUR_REAL(double, double, d, D, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_MKL_SCHUR_REAL(float, float, s, S, RowMajor, LAPACK_ROW_MAJOR) + +} // end namespace Eigen + +#endif // EIGEN_REAL_SCHUR_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index ad107c632..b4aa1ef20 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -26,12 +26,17 @@ #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H #define EIGEN_SELFADJOINTEIGENSOLVER_H -#include "./EigenvaluesCommon.h" #include "./Tridiagonalization.h" +namespace Eigen { + template class GeneralizedSelfAdjointEigenSolver; +namespace internal { +template struct direct_selfadjoint_eigenvalues; +} + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -86,7 +91,7 @@ template class SelfAdjointEigenSolver Options = MatrixType::Options, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; - + /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; @@ -98,6 +103,8 @@ template class SelfAdjointEigenSolver * complex. */ typedef typename NumTraits::Real RealScalar; + + friend struct internal::direct_selfadjoint_eigenvalues::IsComplex>; /** \brief Type for vector of eigenvalues as returned by eigenvalues(). * @@ -198,6 +205,22 @@ template class SelfAdjointEigenSolver * \sa SelfAdjointEigenSolver(const MatrixType&, int) */ SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors); + + /** \brief Computes eigendecomposition of given matrix using a direct algorithm + * + * This is a variant of compute(const MatrixType&, int options) which + * directly solves the underlying polynomial equation. + * + * Currently only 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d). + * + * This method is usually significantly faster than the QR algorithm + * but it might also be less accurate. It is also worth noting that + * for 3x3 matrices it involves trigonometric operations which are + * not necessarily available for all scalar types. + * + * \sa compute(const MatrixType&, int options) + */ + SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors); /** \brief Returns the eigenvectors of given matrix. * @@ -401,7 +424,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver // map the matrix coefficients to [-1:1] to avoid over- and underflow. RealScalar scale = matrix.cwiseAbs().maxCoeff(); - if(scale==Scalar(0)) scale = 1; + if(scale==RealScalar(0)) scale = RealScalar(1); mat = matrix / scale; m_subdiag.resize(n-1); internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors); @@ -466,6 +489,264 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver return *this; } + +namespace internal { + +template struct direct_selfadjoint_eigenvalues +{ + static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options) + { eig.compute(A,options); } +}; + +template struct direct_selfadjoint_eigenvalues +{ + typedef typename SolverType::MatrixType MatrixType; + typedef typename SolverType::RealVectorType VectorType; + typedef typename SolverType::Scalar Scalar; + + static inline void computeRoots(const MatrixType& m, VectorType& roots) + { + using std::sqrt; + using std::atan2; + using std::cos; + using std::sin; + const Scalar s_inv3 = Scalar(1.0)/Scalar(3.0); + const Scalar s_sqrt3 = sqrt(Scalar(3.0)); + + // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The + // eigenvalues are the roots to this equation, all guaranteed to be + // real-valued, because the matrix is symmetric. + Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0); + Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1); + Scalar c2 = m(0,0) + m(1,1) + m(2,2); + + // Construct the parameters used in classifying the roots of the equation + // and in solving the equation for the roots in closed form. + Scalar c2_over_3 = c2*s_inv3; + Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3; + if (a_over_3 > Scalar(0)) + a_over_3 = Scalar(0); + + Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1)); + + Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3; + if (q > Scalar(0)) + q = Scalar(0); + + // Compute the eigenvalues by solving for the roots of the polynomial. + Scalar rho = sqrt(-a_over_3); + Scalar theta = atan2(sqrt(-q),half_b)*s_inv3; + Scalar cos_theta = cos(theta); + Scalar sin_theta = sin(theta); + roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta; + roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); + roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); + + // Sort in increasing order. + if (roots(0) >= roots(1)) + std::swap(roots(0),roots(1)); + if (roots(1) >= roots(2)) + { + std::swap(roots(1),roots(2)); + if (roots(0) >= roots(1)) + std::swap(roots(0),roots(1)); + } + } + + static inline void run(SolverType& solver, const MatrixType& mat, int options) + { + using std::sqrt; + eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 + && (options&EigVecMask)!=EigVecMask + && "invalid option parameter"); + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; + + MatrixType& eivecs = solver.m_eivec; + VectorType& eivals = solver.m_eivalues; + + // map the matrix coefficients to [-1:1] to avoid over- and underflow. + Scalar scale = mat.cwiseAbs().maxCoeff(); + MatrixType scaledMat = mat / scale; + + // compute the eigenvalues + computeRoots(scaledMat,eivals); + + // compute the eigen vectors + if(computeEigenvectors) + { + Scalar safeNorm2 = Eigen::NumTraits::epsilon(); + safeNorm2 *= safeNorm2; + if((eivals(2)-eivals(0))<=Eigen::NumTraits::epsilon()) + { + eivecs.setIdentity(); + } + else + { + scaledMat = scaledMat.template selfadjointView(); + MatrixType tmp; + tmp = scaledMat; + + Scalar d0 = eivals(2) - eivals(1); + Scalar d1 = eivals(1) - eivals(0); + int k = d0 > d1 ? 2 : 0; + d0 = d0 > d1 ? d1 : d0; + + tmp.diagonal().array () -= eivals(k); + VectorType cross; + Scalar n; + n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm(); + + if(n>safeNorm2) + eivecs.col(k) = cross / sqrt(n); + else + { + n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm(); + + if(n>safeNorm2) + eivecs.col(k) = cross / sqrt(n); + else + { + n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm(); + + if(n>safeNorm2) + eivecs.col(k) = cross / sqrt(n); + else + { + // the input matrix and/or the eigenvaues probably contains some inf/NaN, + // => exit + // scale back to the original size. + eivals *= scale; + + solver.m_info = NumericalIssue; + solver.m_isInitialized = true; + solver.m_eigenvectorsOk = computeEigenvectors; + return; + } + } + } + + tmp = scaledMat; + tmp.diagonal().array() -= eivals(1); + + if(d0<=Eigen::NumTraits::epsilon()) + eivecs.col(1) = eivecs.col(k).unitOrthogonal(); + else + { + n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm(); + if(n>safeNorm2) + eivecs.col(1) = cross / sqrt(n); + else + { + n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm(); + if(n>safeNorm2) + eivecs.col(1) = cross / sqrt(n); + else + { + n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm(); + if(n>safeNorm2) + eivecs.col(1) = cross / sqrt(n); + else + { + // we should never reach this point, + // if so the last two eigenvalues are likely to ve very closed to each other + eivecs.col(1) = eivecs.col(k).unitOrthogonal(); + } + } + } + + // make sure that eivecs[1] is orthogonal to eivecs[2] + Scalar d = eivecs.col(1).dot(eivecs.col(k)); + eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized(); + } + + eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized(); + } + } + // Rescale back to the original size. + eivals *= scale; + + solver.m_info = Success; + solver.m_isInitialized = true; + solver.m_eigenvectorsOk = computeEigenvectors; + } +}; + +// 2x2 direct eigenvalues decomposition, code from Hauke Heibel +template struct direct_selfadjoint_eigenvalues +{ + typedef typename SolverType::MatrixType MatrixType; + typedef typename SolverType::RealVectorType VectorType; + typedef typename SolverType::Scalar Scalar; + + static inline void computeRoots(const MatrixType& m, VectorType& roots) + { + using std::sqrt; + const Scalar t0 = Scalar(0.5) * sqrt( abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0)); + const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1)); + roots(0) = t1 - t0; + roots(1) = t1 + t0; + } + + static inline void run(SolverType& solver, const MatrixType& mat, int options) + { + eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 + && (options&EigVecMask)!=EigVecMask + && "invalid option parameter"); + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; + + MatrixType& eivecs = solver.m_eivec; + VectorType& eivals = solver.m_eivalues; + + // map the matrix coefficients to [-1:1] to avoid over- and underflow. + Scalar scale = mat.cwiseAbs().maxCoeff(); + scale = (std::max)(scale,Scalar(1)); + MatrixType scaledMat = mat / scale; + + // Compute the eigenvalues + computeRoots(scaledMat,eivals); + + // compute the eigen vectors + if(computeEigenvectors) + { + scaledMat.diagonal().array () -= eivals(1); + Scalar a2 = abs2(scaledMat(0,0)); + Scalar c2 = abs2(scaledMat(1,1)); + Scalar b2 = abs2(scaledMat(1,0)); + if(a2>c2) + { + eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0); + eivecs.col(1) /= sqrt(a2+b2); + } + else + { + eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0); + eivecs.col(1) /= sqrt(c2+b2); + } + + eivecs.col(0) << eivecs.col(1).unitOrthogonal(); + } + + // Rescale back to the original size. + eivals *= scale; + + solver.m_info = Success; + solver.m_isInitialized = true; + solver.m_eigenvectorsOk = computeEigenvectors; + } +}; + +} + +template +SelfAdjointEigenSolver& SelfAdjointEigenSolver +::computeDirect(const MatrixType& matrix, int options) +{ + internal::direct_selfadjoint_eigenvalues::IsComplex>::run(*this,matrix,options); + return *this; +} + namespace internal { template static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) @@ -515,6 +796,9 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta } } } + } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_SELFADJOINTEIGENSOLVER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h new file mode 100644 index 000000000..5ebcd08e1 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h @@ -0,0 +1,92 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Self-adjoint eigenvalues/eigenvectors. + ******************************************************************************** +*/ + +#ifndef EIGEN_SAEIGENSOLVER_MKL_H +#define EIGEN_SAEIGENSOLVER_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_EIG_SELFADJ(EIGTYPE, MKLTYPE, MKLRTYPE, MKLNAME, EIGCOLROW, MKLCOLROW ) \ +template<> \ +SelfAdjointEigenSolver >& \ +SelfAdjointEigenSolver >::compute(const Matrix& matrix, int options) \ +{ \ + eigen_assert(matrix.cols() == matrix.rows()); \ + eigen_assert((options&~(EigVecMask|GenEigMask))==0 \ + && (options&EigVecMask)!=EigVecMask \ + && "invalid option parameter"); \ + bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \ + lapack_int n = matrix.cols(), lda, matrix_order, info; \ + m_eivalues.resize(n,1); \ + m_subdiag.resize(n-1); \ + m_eivec = matrix; \ +\ + if(n==1) \ + { \ + m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); \ + if(computeEigenvectors) m_eivec.setOnes(n,n); \ + m_info = Success; \ + m_isInitialized = true; \ + m_eigenvectorsOk = computeEigenvectors; \ + return *this; \ + } \ +\ + lda = matrix.outerStride(); \ + matrix_order=MKLCOLROW; \ + char jobz, uplo='L'/*, range='A'*/; \ + jobz = computeEigenvectors ? 'V' : 'N'; \ +\ + info = LAPACKE_##MKLNAME( matrix_order, jobz, uplo, n, (MKLTYPE*)m_eivec.data(), lda, (MKLRTYPE*)m_eivalues.data() ); \ + m_info = (info==0) ? Success : NoConvergence; \ + m_isInitialized = true; \ + m_eigenvectorsOk = computeEigenvectors; \ + return *this; \ +} + + +EIGEN_MKL_EIG_SELFADJ(double, double, double, dsyev, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_EIG_SELFADJ(float, float, float, ssyev, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, ColMajor, LAPACK_COL_MAJOR) +EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8, float, cheev, ColMajor, LAPACK_COL_MAJOR) + +EIGEN_MKL_EIG_SELFADJ(double, double, double, dsyev, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_MKL_EIG_SELFADJ(float, float, float, ssyev, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8, float, cheev, RowMajor, LAPACK_ROW_MAJOR) + +} // end namespace Eigen + +#endif // EIGEN_SAEIGENSOLVER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h index ae4cdce7a..e8f0ac5d1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h @@ -26,6 +26,8 @@ #ifndef EIGEN_TRIDIAGONALIZATION_H #define EIGEN_TRIDIAGONALIZATION_H +namespace Eigen { + namespace internal { template struct TridiagonalizationMatrixTReturnType; @@ -97,13 +99,13 @@ template class Tridiagonalization typedef internal::TridiagonalizationMatrixTReturnType MatrixTReturnType; typedef typename internal::conditional::IsComplex, - const typename Diagonal::RealReturnType, + typename internal::add_const_on_value_type::RealReturnType>::type, const Diagonal >::type DiagonalReturnType; typedef typename internal::conditional::IsComplex, - const typename Diagonal< - Block >::RealReturnType, + typename internal::add_const_on_value_type >::RealReturnType>::type, const Diagonal< Block > >::type SubDiagonalReturnType; @@ -560,9 +562,11 @@ template struct TridiagonalizationMatrixTReturnType Index cols() const { return m_matrix.cols(); } protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; }; } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_TRIDIAGONALIZATION_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h index b51deb3f3..2cb894330 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ALIGNEDBOX_H #define EIGEN_ALIGNEDBOX_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * @@ -190,7 +192,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline bool contains(const MatrixBase& a_p) const { - const typename internal::nested::type p(a_p.derived()); + typename internal::nested::type p(a_p.derived()); return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all(); } @@ -202,7 +204,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline AlignedBox& extend(const MatrixBase& a_p) { - const typename internal::nested::type p(a_p.derived()); + typename internal::nested::type p(a_p.derived()); m_min = m_min.cwiseMin(p); m_max = m_max.cwiseMax(p); return *this; @@ -310,7 +312,7 @@ template inline Scalar AlignedBox::squaredExteriorDistance(const MatrixBase& a_p) const { const typename internal::nested::type p(a_p.derived()); - Scalar dist2 = 0.; + Scalar dist2(0); Scalar aux; for (Index k=0; k::squaredExteriorDistance(const Matri template inline Scalar AlignedBox::squaredExteriorDistance(const AlignedBox& b) const { - Scalar dist2 = 0.; + Scalar dist2(0); Scalar aux; for (Index k=0; k::squaredExteriorDistance(const Align return dist2; } +/** \defgroup alignedboxtypedefs Global aligned box typedefs + * + * \ingroup Geometry_Module + * + * Eigen defines several typedef shortcuts for most common aligned box types. + * + * The general patterns are the following: + * + * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double. + * + * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats. + * + * \sa class AlignedBox + */ + +#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup alignedboxtypedefs */ \ +typedef AlignedBox AlignedBox##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) + +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) + +#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_TYPEDEFS + +} // end namespace Eigen + #endif // EIGEN_ALIGNEDBOX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h index 0ec4624cf..f0e3ff2b3 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ANGLEAXIS_H #define EIGEN_ANGLEAXIS_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class AngleAxis @@ -144,7 +146,7 @@ public: m_angle = Scalar(other.angle()); } - inline static const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); } + static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. @@ -238,4 +240,6 @@ AngleAxis::toRotationMatrix(void) const return res; } +} // end namespace Eigen + #endif // EIGEN_ANGLEAXIS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/EulerAngles.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/EulerAngles.h index d246a6ebf..0ce7f957d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/EulerAngles.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/EulerAngles.h @@ -25,6 +25,8 @@ #ifndef EIGEN_EULERANGLES_H #define EIGEN_EULERANGLES_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * @@ -92,5 +94,6 @@ MatrixBase::eulerAngles(Index a0, Index a1, Index a2) const return res; } +} // end namespace Eigen #endif // EIGEN_EULERANGLES_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h index 2bc4f7e87..0c4cda01d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h @@ -25,6 +25,8 @@ #ifndef EIGEN_HOMOGENEOUS_H #define EIGEN_HOMOGENEOUS_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class Homogeneous @@ -121,7 +123,7 @@ template class Homogeneous } protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; }; /** \geometry_module @@ -216,8 +218,8 @@ template struct take_matrix_for_product > { typedef Transform TransformType; - typedef typename TransformType::ConstAffinePart type; - static const type run (const TransformType& x) { return x.affine(); } + typedef typename internal::add_const::type type; + static type run (const TransformType& x) { return x.affine(); } }; template @@ -270,8 +272,8 @@ struct homogeneous_left_product_impl,Lhs> .template replicate(m_rhs.cols()); } - const typename LhsMatrixTypeCleaned::Nested m_lhs; - const typename MatrixType::Nested m_rhs; + typename LhsMatrixTypeCleaned::Nested m_lhs; + typename MatrixType::Nested m_rhs; }; template @@ -309,10 +311,12 @@ struct homogeneous_right_product_impl,Rhs> .template replicate(m_lhs.rows()); } - const typename MatrixType::Nested m_lhs; - const typename Rhs::Nested m_rhs; + typename MatrixType::Nested m_lhs; + typename Rhs::Nested m_rhs; }; } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_HOMOGENEOUS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Hyperplane.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Hyperplane.h index d85d3e553..6abf1664d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Hyperplane.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Hyperplane.h @@ -26,6 +26,8 @@ #ifndef EIGEN_HYPERPLANE_H #define EIGEN_HYPERPLANE_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class Hyperplane @@ -277,4 +279,6 @@ protected: Coefficients m_coeffs; }; +} // end namespace Eigen + #endif // EIGEN_HYPERPLANE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/OrthoMethods.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/OrthoMethods.h index 52b469881..0a8a81dd1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/OrthoMethods.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/OrthoMethods.h @@ -26,6 +26,8 @@ #ifndef EIGEN_ORTHOMETHODS_H #define EIGEN_ORTHOMETHODS_H +namespace Eigen { + /** \geometry_module * * \returns the cross product of \c *this and \a other @@ -43,8 +45,8 @@ MatrixBase::cross(const MatrixBase& other) const // Note that there is no need for an expression here since the compiler // optimize such a small temporary very well (even within a complex expression) - const typename internal::nested::type lhs(derived()); - const typename internal::nested::type rhs(other.derived()); + typename internal::nested::type lhs(derived()); + typename internal::nested::type rhs(other.derived()); return typename cross_product_return_type::type( internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), @@ -56,9 +58,9 @@ namespace internal { template< int Arch,typename VectorLhs,typename VectorRhs, typename Scalar = typename VectorLhs::Scalar, - bool Vectorizable = (VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit> + bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)> struct cross3_impl { - inline static typename internal::plain_matrix_type::type + static inline typename internal::plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { return typename internal::plain_matrix_type::type( @@ -145,7 +147,7 @@ struct unitOrthogonal_selector typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; typedef Matrix Vector2; - inline static VectorType run(const Derived& src) + static inline VectorType run(const Derived& src) { VectorType perp = VectorType::Zero(src.size()); Index maxi = 0; @@ -167,7 +169,7 @@ struct unitOrthogonal_selector typedef typename plain_matrix_type::type VectorType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - inline static VectorType run(const Derived& src) + static inline VectorType run(const Derived& src) { VectorType perp; /* Let us compute the crossed product of *this with a vector @@ -205,7 +207,7 @@ template struct unitOrthogonal_selector { typedef typename plain_matrix_type::type VectorType; - inline static VectorType run(const Derived& src) + static inline VectorType run(const Derived& src) { return VectorType(-conj(src.y()), conj(src.x())).normalized(); } }; @@ -226,4 +228,6 @@ MatrixBase::unitOrthogonal() const return internal::unitOrthogonal_selector::run(derived()); } +} // end namespace Eigen + #endif // EIGEN_ORTHOMETHODS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/ParametrizedLine.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/ParametrizedLine.h index b90f9c088..ddbda83dc 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/ParametrizedLine.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/ParametrizedLine.h @@ -26,6 +26,8 @@ #ifndef EIGEN_PARAMETRIZEDLINE_H #define EIGEN_PARAMETRIZEDLINE_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class ParametrizedLine @@ -106,8 +108,16 @@ public: VectorType projection(const VectorType& p) const { return origin() + direction().dot(p-origin()) * direction(); } + VectorType pointAt( Scalar t ) const; + + template + Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; + template Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; + + template + VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; /** \returns \c *this with scalar type casted to \a NewScalarType * @@ -155,14 +165,46 @@ inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const H origin() = -hyperplane.normal()*hyperplane.offset(); } -/** \returns the parameter value of the intersection between \c *this and the given hyperplane +/** \returns the point at \a t along this line + */ +template +inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType +ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt( _Scalar t ) const +{ + return origin() + (direction()*t); +} + +/** \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template template -inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const +inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return -(hyperplane.offset()+hyperplane.normal().dot(origin())) / hyperplane.normal().dot(direction()); } + +/** \deprecated use intersectionParameter() + * \returns the parameter value of the intersection between \c *this and the given \a hyperplane + */ +template +template +inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const +{ + return intersectionParameter(hyperplane); +} + +/** \returns the point of the intersection between \c *this and the given hyperplane + */ +template +template +inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType +ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const +{ + return pointAt(intersectionParameter(hyperplane)); +} + +} // end namespace Eigen + #endif // EIGEN_PARAMETRIZEDLINE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h index 9180db67d..75083363c 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h @@ -25,6 +25,8 @@ #ifndef EIGEN_QUATERNION_H #define EIGEN_QUATERNION_H +namespace Eigen { + /*************************************************************************** * Definition of QuaternionBase @@ -38,6 +40,12 @@ template class QuaternionBase : public RotationBase { @@ -109,7 +117,7 @@ public: /** \returns a quaternion representing an identity rotation * \sa MatrixBase::Identity() */ - inline static Quaternion Identity() { return Quaternion(1, 0, 0, 0); } + static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); } /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity() */ @@ -278,6 +286,9 @@ public: explicit inline Quaternion(const Quaternion& other) { m_coeffs = other.coeffs().template cast(); } + template + static Quaternion FromTwoVectors(const MatrixBase& a, const MatrixBase& b); + inline Coefficients& coeffs() { return m_coeffs;} inline const Coefficients& coeffs() const { return m_coeffs;} @@ -287,7 +298,7 @@ protected: Coefficients m_coeffs; #ifndef EIGEN_PARSED_BY_DOXYGEN - EIGEN_STRONG_INLINE static void _check_template_params() + static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) @@ -434,7 +445,7 @@ typedef Map, Aligned> QuaternionMapAlignedd; namespace internal { template struct quat_product { - EIGEN_STRONG_INLINE static Quaternion run(const QuaternionBase& a, const QuaternionBase& b){ + static EIGEN_STRONG_INLINE Quaternion run(const QuaternionBase& a, const QuaternionBase& b){ return Quaternion ( a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), @@ -544,9 +555,9 @@ QuaternionBase::toRotationMatrix(void) const // it has to be inlined, and so the return by value is not an issue Matrix3 res; - const Scalar tx = 2*this->x(); - const Scalar ty = 2*this->y(); - const Scalar tz = 2*this->z(); + const Scalar tx = Scalar(2)*this->x(); + const Scalar ty = Scalar(2)*this->y(); + const Scalar tz = Scalar(2)*this->z(); const Scalar twx = tx*this->w(); const Scalar twy = ty*this->w(); const Scalar twz = tz*this->w(); @@ -557,15 +568,15 @@ QuaternionBase::toRotationMatrix(void) const const Scalar tyz = tz*this->y(); const Scalar tzz = tz*this->z(); - res.coeffRef(0,0) = 1-(tyy+tzz); + res.coeffRef(0,0) = Scalar(1)-(tyy+tzz); res.coeffRef(0,1) = txy-twz; res.coeffRef(0,2) = txz+twy; res.coeffRef(1,0) = txy+twz; - res.coeffRef(1,1) = 1-(txx+tzz); + res.coeffRef(1,1) = Scalar(1)-(txx+tzz); res.coeffRef(1,2) = tyz-twx; res.coeffRef(2,0) = txz-twy; res.coeffRef(2,1) = tyz+twx; - res.coeffRef(2,2) = 1-(txx+tyy); + res.coeffRef(2,2) = Scalar(1)-(txx+tyy); return res; } @@ -618,6 +629,27 @@ inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase +template +Quaternion Quaternion::FromTwoVectors(const MatrixBase& a, const MatrixBase& b) +{ + Quaternion quat; + quat.setFromTwoVectors(a, b); + return quat; +} + + /** \returns the multiplicative inverse of \c *this * Note that in most cases, i.e., if you simply want the opposite rotation, * and/or the quaternion is normalized, then it is enough to use the conjugate. @@ -709,7 +741,7 @@ struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; typedef DenseIndex Index; - template inline static void run(QuaternionBase& q, const Other& mat) + template static inline void run(QuaternionBase& q, const Other& mat) { // This algorithm comes from "Quaternion Calculus and Fast Animation", // Ken Shoemake, 1987 SIGGRAPH course notes @@ -748,7 +780,7 @@ template struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; - template inline static void run(QuaternionBase& q, const Other& vec) + template static inline void run(QuaternionBase& q, const Other& vec) { q.coeffs() = vec; } @@ -756,4 +788,6 @@ struct quaternionbase_assign_impl } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_QUATERNION_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Rotation2D.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Rotation2D.h index cf36da1c5..4339e0f0f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Rotation2D.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Rotation2D.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ROTATION2D_H #define EIGEN_ROTATION2D_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class Rotation2D @@ -121,7 +123,7 @@ public: m_angle = Scalar(other.angle()); } - inline static Rotation2D Identity() { return Rotation2D(0); } + static inline Rotation2D Identity() { return Rotation2D(0); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. @@ -162,4 +164,6 @@ Rotation2D::toRotationMatrix(void) const return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); } +} // end namespace Eigen + #endif // EIGEN_ROTATION2D_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/RotationBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/RotationBase.h index 1abf06bb6..0b8fb0a52 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/RotationBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/RotationBase.h @@ -25,6 +25,8 @@ #ifndef EIGEN_ROTATIONBASE_H #define EIGEN_ROTATIONBASE_H +namespace Eigen { + // forward declaration namespace internal { template @@ -115,7 +117,7 @@ struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; - inline static ReturnType run(const RotationDerived& r, const MatrixType& m) + static inline ReturnType run(const RotationDerived& r, const MatrixType& m) { return r.toRotationMatrix() * m; } }; @@ -123,7 +125,7 @@ template struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix, false > { typedef Transform ReturnType; - inline static ReturnType run(const RotationDerived& r, const DiagonalMatrix& m) + static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix& m) { ReturnType res(r); res.linear() *= m; @@ -136,7 +138,7 @@ struct rotation_base_generic_product_selector ReturnType; - EIGEN_STRONG_INLINE static ReturnType run(const RotationDerived& r, const OtherVectorType& v) + static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v) { return r._transformVector(v); } @@ -192,20 +194,20 @@ namespace internal { * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis */ template -inline static Matrix toRotationMatrix(const Scalar& s) +static inline Matrix toRotationMatrix(const Scalar& s) { EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) return Rotation2D(s).toRotationMatrix(); } template -inline static Matrix toRotationMatrix(const RotationBase& r) +static inline Matrix toRotationMatrix(const RotationBase& r) { return r.toRotationMatrix(); } template -inline static const MatrixBase& toRotationMatrix(const MatrixBase& mat) +static inline const MatrixBase& toRotationMatrix(const MatrixBase& mat) { EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, YOU_MADE_A_PROGRAMMING_MISTAKE) @@ -214,4 +216,6 @@ inline static const MatrixBase& toRotationMatrix(const MatrixBase< } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_ROTATIONBASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Scaling.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Scaling.h index c911d13e1..080f33794 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Scaling.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Scaling.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SCALING_H #define EIGEN_SCALING_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class Scaling @@ -179,4 +181,6 @@ UniformScaling::operator* (const Transform& t return res; } +} // end namespace Eigen + #endif // EIGEN_SCALING_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h index 19d012572..e81305ccc 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h @@ -27,6 +27,8 @@ #ifndef EIGEN_TRANSFORM_H #define EIGEN_TRANSFORM_H +namespace Eigen { + namespace internal { template @@ -37,7 +39,7 @@ struct transform_traits Dim = Transform::Dim, HDim = Transform::HDim, Mode = Transform::Mode, - IsProjective = (Mode==Projective) + IsProjective = (int(Mode)==int(Projective)) }; }; @@ -61,7 +63,7 @@ template< typename Lhs, typename Rhs, bool AnyProjective = transform_traits::IsProjective || - transform_traits::IsProjective> + transform_traits::IsProjective> struct transform_transform_product_impl; template< typename Other, @@ -207,9 +209,9 @@ public: /** type of the matrix used to represent the linear part of the transformation */ typedef Matrix LinearMatrixType; /** type of read/write reference to the linear part of the transformation */ - typedef Block LinearPart; + typedef Block LinearPart; /** type of read reference to the linear part of the transformation */ - typedef const Block ConstLinearPart; + typedef const Block ConstLinearPart; /** type of read/write reference to the affine part of the transformation */ typedef typename internal::conditional VectorType; /** type of a read/write reference to the translation part of the rotation */ - typedef Block TranslationPart; + typedef Block TranslationPart; /** type of a read reference to the translation part of the rotation */ - typedef const Block ConstTranslationPart; + typedef const Block ConstTranslationPart; /** corresponding translation type */ typedef Translation TranslationType; @@ -279,6 +281,9 @@ public: template inline explicit Transform(const EigenBase& other) { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); + check_template_params(); internal::transform_construct_from_matrix::run(this, other.derived()); } @@ -287,6 +292,9 @@ public: template inline Transform& operator=(const EigenBase& other) { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); + internal::transform_construct_from_matrix::run(this, other.derived()); return *this; } @@ -376,9 +384,9 @@ public: inline MatrixType& matrix() { return m_matrix; } /** \returns a read-only expression of the linear part of the transformation */ - inline ConstLinearPart linear() const { return m_matrix.template block(0,0); } + inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); } /** \returns a writable expression of the linear part of the transformation */ - inline LinearPart linear() { return m_matrix.template block(0,0); } + inline LinearPart linear() { return LinearPart(m_matrix,0,0); } /** \returns a read-only expression of the Dim x HDim affine part of the transformation */ inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); } @@ -386,9 +394,9 @@ public: inline AffinePart affine() { return take_affine_part::run(m_matrix); } /** \returns a read-only expression of the translation vector of the transformation */ - inline ConstTranslationPart translation() const { return m_matrix.template block(0,Dim); } + inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); } /** \returns a writable expression of the translation vector of the transformation */ - inline TranslationPart translation() { return m_matrix.template block(0,Dim); } + inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); } /** \returns an expression of the product between the transform \c *this and a matrix expression \a other * @@ -460,15 +468,40 @@ public: { return internal::transform_transform_product_impl::run(*this,other); } - + + #ifdef __INTEL_COMPILER +private: + // this intermediate structure permits to workaround a bug in ICC 11: + // error: template instantiation resulted in unexpected function type of "Eigen::Transform + // (const Eigen::Transform &) const" + // (the meaning of a name may have changed since the template declaration -- the type of the template is: + // "Eigen::internal::transform_transform_product_impl, + // Eigen::Transform, >::ResultType (const Eigen::Transform &) const") + // + template struct icc_11_workaround + { + typedef internal::transform_transform_product_impl > ProductType; + typedef typename ProductType::ResultType ResultType; + }; + +public: /** Concatenates two different transformations */ template - inline const typename internal::transform_transform_product_impl< - Transform,Transform >::ResultType + inline typename icc_11_workaround::ResultType + operator * (const Transform& other) const + { + typedef typename icc_11_workaround::ProductType ProductType; + return ProductType::run(*this,other); + } + #else + /** Concatenates two different transformations */ + template + inline typename internal::transform_transform_product_impl >::ResultType operator * (const Transform& other) const { return internal::transform_transform_product_impl >::run(*this,other); } + #endif /** \sa MatrixBase::setIdentity() */ void setIdentity() { m_matrix.setIdentity(); } @@ -608,7 +641,7 @@ public: protected: #ifndef EIGEN_PARSED_BY_DOXYGEN - EIGEN_STRONG_INLINE static void check_template_params() + static EIGEN_STRONG_INLINE void check_template_params() { EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } @@ -1219,7 +1252,7 @@ struct transform_right_product_impl< TransformType, MatrixType, 0 > { typedef typename MatrixType::PlainObject ResultType; - EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other) + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { return T.matrix() * other; } @@ -1237,11 +1270,11 @@ struct transform_right_product_impl< TransformType, MatrixType, 1 > typedef typename MatrixType::PlainObject ResultType; - EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other) + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); - typedef Block TopLeftLhs; + typedef Block TopLeftLhs; ResultType res(other.rows(),other.cols()); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other; @@ -1263,15 +1296,13 @@ struct transform_right_product_impl< TransformType, MatrixType, 2 > typedef typename MatrixType::PlainObject ResultType; - EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other) + static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); - typedef Block TopLeftLhs; - - ResultType res(other.rows(),other.cols()); - TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.linear() * other; - TopLeftLhs(res, 0, 0, Dim, other.cols()).colwise() += T.translation(); + typedef Block TopLeftLhs; + ResultType res(Replicate(T.translation(),1,other.cols())); + TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other; return res; } @@ -1391,6 +1422,37 @@ struct transform_transform_product_impl } }; +template +struct transform_transform_product_impl,Transform,true > +{ + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; + static ResultType run(const Lhs& lhs, const Rhs& rhs) + { + ResultType res; + res.matrix().template topRows() = lhs.matrix() * rhs.matrix(); + res.matrix().row(Dim) = rhs.matrix().row(Dim); + return res; + } +}; + +template +struct transform_transform_product_impl,Transform,true > +{ + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; + static ResultType run(const Lhs& lhs, const Rhs& rhs) + { + ResultType res(lhs.matrix().template leftCols() * rhs.matrix()); + res.matrix().col(Dim) += lhs.matrix().col(Dim); + return res; + } +}; + } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_TRANSFORM_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h index d8fe50f98..8d77a3d23 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h @@ -25,6 +25,8 @@ #ifndef EIGEN_TRANSLATION_H #define EIGEN_TRANSLATION_H +namespace Eigen { + /** \geometry_module \ingroup Geometry_Module * * \class Translation @@ -54,6 +56,8 @@ public: typedef Matrix LinearMatrixType; /** corresponding affine transformation type */ typedef Transform AffineTransformType; + /** corresponding isometric transformation type */ + typedef Transform IsometryTransformType; protected: @@ -114,8 +118,8 @@ public: /** Concatenates a translation and a rotation */ template - inline AffineTransformType operator*(const RotationBase& r) const - { return *this * r.toRotationMatrix(); } + inline IsometryTransformType operator*(const RotationBase& r) const + { return *this * IsometryTransformType(r); } /** \returns the concatenation of a linear transformation \a l with the translation \a t */ // its a nightmare to define a templated friend function outside its declaration @@ -212,4 +216,6 @@ Translation::operator* (const EigenBase& linear) const return res; } +} // end namespace Eigen + #endif // EIGEN_TRANSLATION_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Umeyama.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Umeyama.h index b50f46173..4d4cc3632 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Umeyama.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Umeyama.h @@ -31,6 +31,8 @@ // * Eigen/SVD // * Eigen/Array +namespace Eigen { + #ifndef EIGEN_PARSED_BY_DOXYGEN // These helpers are required since it allows to use mixed types as parameters @@ -180,4 +182,6 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo return Rt; } +} // end namespace Eigen + #endif // EIGEN_UMEYAMA_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/arch/Geometry_SSE.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/arch/Geometry_SSE.h index cbe695c72..08d0f600a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/arch/Geometry_SSE.h @@ -26,12 +26,14 @@ #ifndef EIGEN_GEOMETRY_SSE_H #define EIGEN_GEOMETRY_SSE_H +namespace Eigen { + namespace internal { template struct quat_product { - inline static Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) + static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000)); Quaternion res; @@ -53,7 +55,7 @@ struct quat_product template struct cross3_impl { - inline static typename plain_matrix_type::type + static inline typename plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { __m128 a = lhs.template packet(0); @@ -72,7 +74,7 @@ struct cross3_impl template struct quat_product { - inline static Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) + static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); @@ -96,7 +98,7 @@ struct quat_product */ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); -#ifdef __SSE3__ +#ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2))); #else @@ -110,7 +112,7 @@ struct quat_product */ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); -#ifdef __SSE3__ +#ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2))); #else @@ -123,4 +125,6 @@ struct quat_product } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_GEOMETRY_SSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Householder/BlockHouseholder.h b/gtsam/3rdparty/Eigen/Eigen/src/Householder/BlockHouseholder.h index 23ce1bfbd..b69fd46d5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Householder/BlockHouseholder.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Householder/BlockHouseholder.h @@ -28,6 +28,8 @@ // This file contains some helper function to deal with block householder reflectors +namespace Eigen { + namespace internal { /** \internal */ @@ -64,7 +66,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec Matrix T(nbVecs,nbVecs); make_block_householder_triangular_factor(T, vectors, hCoeffs); - const TriangularView& V(vectors); + const TriangularView& V(vectors); // A -= V T V^* A Matrix struct decrement_size { @@ -35,6 +37,22 @@ template struct decrement_size }; } +/** Computes the elementary reflector H such that: + * \f$ H *this = [ beta 0 ... 0]^T \f$ + * where the transformation H is: + * \f$ H = I - tau v v^*\f$ + * and the vector v is: + * \f$ v^T = [1 essential^T] \f$ + * + * The essential part of the vector \c v is stored in *this. + * + * On output: + * \param tau the scaling factor of the Householder transformation + * \param beta the result of H * \c *this + * + * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(), + * MatrixBase::applyHouseholderOnTheRight() + */ template void MatrixBase::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) { @@ -51,7 +69,7 @@ void MatrixBase::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) * * On output: * \param essential the essential part of the vector \c v - * \param tau the scaling factor of the householder transformation + * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), @@ -86,6 +104,21 @@ void MatrixBase::makeHouseholder( } } +/** Apply the elementary reflector H given by + * \f$ H = I - tau v v^*\f$ + * with + * \f$ v^T = [1 essential^T] \f$ + * from the left to a vector or matrix. + * + * On input: + * \param essential the essential part of the vector \c v + * \param tau the scaling factor of the Householder transformation + * \param workspace a pointer to working space with at least + * this->cols() * essential.size() entries + * + * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), + * MatrixBase::applyHouseholderOnTheRight() + */ template template void MatrixBase::applyHouseholderOnTheLeft( @@ -108,6 +141,21 @@ void MatrixBase::applyHouseholderOnTheLeft( } } +/** Apply the elementary reflector H given by + * \f$ H = I - tau v v^*\f$ + * with + * \f$ v^T = [1 essential^T] \f$ + * from the right to a vector or matrix. + * + * On input: + * \param essential the essential part of the vector \c v + * \param tau the scaling factor of the Householder transformation + * \param workspace a pointer to working space with at least + * this->cols() * essential.size() entries + * + * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), + * MatrixBase::applyHouseholderOnTheLeft() + */ template template void MatrixBase::applyHouseholderOnTheRight( @@ -130,4 +178,6 @@ void MatrixBase::applyHouseholderOnTheRight( } } +} // end namespace Eigen + #endif // EIGEN_HOUSEHOLDER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h b/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h index 717f29c99..1cb461b48 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h @@ -26,6 +26,8 @@ #ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H #define EIGEN_HOUSEHOLDER_SEQUENCE_H +namespace Eigen { + /** \ingroup Householder_Module * \householder_module * \class HouseholderSequence @@ -237,13 +239,20 @@ template class HouseholderS ConjugateReturnType inverse() const { return adjoint(); } /** \internal */ - template void evalTo(DestType& dst) const + template inline void evalTo(DestType& dst) const { - Index vecs = m_length; - // FIXME find a way to pass this temporary if the user wants to Matrix temp(rows()); - if( internal::is_same::type,DestType>::value + AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows()); + evalTo(dst, workspace); + } + + /** \internal */ + template + void evalTo(Dest& dst, Workspace& workspace) const + { + workspace.resize(rows()); + Index vecs = m_length; + if( internal::is_same::type,Dest>::value && internal::extract_data(dst) == internal::extract_data(m_vectors)) { // in-place @@ -254,10 +263,10 @@ template class HouseholderS Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) - .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0)); + .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data()); else dst.bottomRightCorner(cornerSize, cornerSize) - .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0)); + .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data()); // clear the off diagonal vector dst.col(k).tail(rows()-k-1).setZero(); @@ -274,10 +283,10 @@ template class HouseholderS Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) - .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0)); + .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); else dst.bottomRightCorner(cornerSize, cornerSize) - .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0)); + .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); } } } @@ -285,24 +294,40 @@ template class HouseholderS /** \internal */ template inline void applyThisOnTheRight(Dest& dst) const { - Matrix temp(dst.rows()); + Matrix workspace(dst.rows()); + applyThisOnTheRight(dst, workspace); + } + + /** \internal */ + template + inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const + { + workspace.resize(dst.rows()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? m_length-k-1 : k; dst.rightCols(rows()-m_shift-actual_k) - .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); + .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } /** \internal */ template inline void applyThisOnTheLeft(Dest& dst) const { - Matrix temp(dst.cols()); + Matrix workspace(dst.cols()); + applyThisOnTheLeft(dst, workspace); + } + + /** \internal */ + template + inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const + { + workspace.resize(dst.cols()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? k : m_length-k-1; dst.bottomRows(rows()-m_shift-actual_k) - .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); + .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } @@ -426,4 +451,6 @@ HouseholderSequence rightHouseholderSequence( return HouseholderSequence(v, h); } +} // end namespace Eigen + #endif // EIGEN_HOUSEHOLDER_SEQUENCE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h new file mode 100644 index 000000000..c9fe9c618 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -0,0 +1,163 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_BASIC_PRECONDITIONERS_H +#define EIGEN_BASIC_PRECONDITIONERS_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \brief A preconditioner based on the digonal entries + * + * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. + * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: + * \code + * A.diagonal().asDiagonal() . x = b + * \endcode + * + * \tparam _Scalar the type of the scalar. + * + * This preconditioner is suitable for both selfadjoint and general problems. + * The diagonal entries are pre-inverted and stored into a dense vector. + * + * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. + * + */ +template +class DiagonalPreconditioner +{ + typedef _Scalar Scalar; + typedef Matrix Vector; + typedef typename Vector::Index Index; + + public: + typedef Matrix MatrixType; + + DiagonalPreconditioner() : m_isInitialized(false) {} + + template + DiagonalPreconditioner(const MatrixType& mat) : m_invdiag(mat.cols()) + { + compute(mat); + } + + Index rows() const { return m_invdiag.size(); } + Index cols() const { return m_invdiag.size(); } + + template + DiagonalPreconditioner& analyzePattern(const MatrixType& ) + { + return *this; + } + + template + DiagonalPreconditioner& factorize(const MatrixType& mat) + { + m_invdiag.resize(mat.cols()); + for(int j=0; j + DiagonalPreconditioner& compute(const MatrixType& mat) + { + return factorize(mat); + } + + template + void _solve(const Rhs& b, Dest& x) const + { + x = m_invdiag.array() * b.array() ; + } + + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); + eigen_assert(m_invdiag.size()==b.rows() + && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + protected: + Vector m_invdiag; + bool m_isInitialized; +}; + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef DiagonalPreconditioner<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A naive preconditioner which approximates any matrix as the identity matrix + * + * \sa class DiagonalPreconditioner + */ +class IdentityPreconditioner +{ + public: + + IdentityPreconditioner() {} + + template + IdentityPreconditioner(const MatrixType& ) {} + + template + IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& compute(const MatrixType& ) { return *this; } + + template + inline const Rhs& solve(const Rhs& b) const { return b; } +}; + +} // end namespace Eigen + +#endif // EIGEN_BASIC_PRECONDITIONERS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h new file mode 100644 index 000000000..5f23968bc --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -0,0 +1,269 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_BICGSTAB_H +#define EIGEN_BICGSTAB_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level bi conjugate gradient stabilized algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + * \return false in the case of numerical issue, for example a break down of BiCGSTAB. + */ +template +bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, int& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + RealScalar tol = tol_error; + int maxIters = iters; + + int n = mat.cols(); + VectorType r = rhs - mat * x; + VectorType r0 = r; + + RealScalar r0_sqnorm = r0.squaredNorm(); + Scalar rho = 1; + Scalar alpha = 1; + Scalar w = 1; + + VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); + VectorType y(n), z(n); + VectorType kt(n), ks(n); + + VectorType s(n), t(n); + + RealScalar tol2 = tol*tol; + int i = 0; + + while ( r.squaredNorm()/r0_sqnorm > tol2 && i > +class BiCGSTAB; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A bi conjugate gradient stabilized solver for sparse square problems + * + * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient + * stabilized algorithm. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * BiCGSTAB > solver; + * solver(A); + * x = solver.solve(b); + * std::cout << "#iterations: " << solver.iterations() << std::endl; + * std::cout << "estimated error: " << solver.error() << std::endl; + * // update b, and solve again + * x = solver.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. Here is a step by + * step execution example starting with a random guess and printing the evolution + * of the estimated error: + * * \code + * x = VectorXd::Random(n); + * solver.setMaxIterations(1); + * int i = 0; + * do { + * x = solver.solveWithGuess(b,x); + * std::cout << i << " : " << solver.error() << std::endl; + * ++i; + * } while (solver.info()!=Success && i<100); + * \endcode + * Note that such a step by step excution is slightly slower. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, typename _Preconditioner> +class BiCGSTAB : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::mp_matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + BiCGSTAB() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + BiCGSTAB(const MatrixType& A) : Base(A) {} + + ~BiCGSTAB() {} + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * \a x0 as an initial solution. + * + * \sa compute() + */ + template + inline const internal::solve_retval_with_guess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "BiCGSTAB is not initialized."); + eigen_assert(Base::rows()==b.rows() + && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval_with_guess + (*this, b.derived(), x0); + } + + /** \internal */ + template + void _solveWithGuess(const Rhs& b, Dest& x) const + { + bool failed = false; + for(int j=0; j + void _solve(const Rhs& b, Dest& x) const + { + x.setZero(); + _solveWithGuess(b,x); + } + +protected: + +}; + + +namespace internal { + + template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BICGSTAB_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt new file mode 100644 index 000000000..59ccc0072 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_IterativeLinearSolvers_SRCS "*.h") + +INSTALL(FILES + ${Eigen_IterativeLinearSolvers_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/IterativeLinearSolvers COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h new file mode 100644 index 000000000..edab2299e --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h @@ -0,0 +1,266 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CONJUGATE_GRADIENT_H +#define EIGEN_CONJUGATE_GRADIENT_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level conjugate gradient algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ +template +EIGEN_DONT_INLINE +void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, int& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + RealScalar tol = tol_error; + int maxIters = iters; + + int n = mat.cols(); + + VectorType residual = rhs - mat * x; //initial residual + VectorType p(n); + + p = precond.solve(residual); //initial search direction + + VectorType z(n), tmp(n); + RealScalar absNew = internal::real(residual.dot(p)); // the square of the absolute value of r scaled by invM + RealScalar rhsNorm2 = rhs.squaredNorm(); + RealScalar residualNorm2 = 0; + RealScalar threshold = tol*tol*rhsNorm2; + int i = 0; + while(i < maxIters) + { + tmp.noalias() = mat * p; // the bottleneck of the algorithm + + Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir + x += alpha * p; // update solution + residual -= alpha * tmp; // update residue + + residualNorm2 = residual.squaredNorm(); + if(residualNorm2 < threshold) + break; + + z = precond.solve(residual); // approximately solve for "A z = residual" + + RealScalar absOld = absNew; + absNew = internal::real(residual.dot(z)); // update the absolute value of r + RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction + p = z + beta * p; // update search direction + i++; + } + tol_error = sqrt(residualNorm2 / rhsNorm2); + iters = i; +} + +} + +template< typename _MatrixType, int _UpLo=Lower, + typename _Preconditioner = DiagonalPreconditioner > +class ConjugateGradient; + +namespace internal { + +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A conjugate gradient solver for sparse self-adjoint problems + * + * This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm. + * The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * ConjugateGradient > cg; + * cg.compute(A); + * x = cg.solve(b); + * std::cout << "#iterations: " << cg.iterations() << std::endl; + * std::cout << "estimated error: " << cg.error() << std::endl; + * // update b, and solve again + * x = cg.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. Here is a step by + * step execution example starting with a random guess and printing the evolution + * of the estimated error: + * * \code + * x = VectorXd::Random(n); + * cg.setMaxIterations(1); + * int i = 0; + * do { + * x = cg.solveWithGuess(b,x); + * std::cout << i << " : " << cg.error() << std::endl; + * ++i; + * } while (cg.info()!=Success && i<100); + * \endcode + * Note that such a step by step excution is slightly slower. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +class ConjugateGradient : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::mp_matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + enum { + UpLo = _UpLo + }; + +public: + + /** Default constructor. */ + ConjugateGradient() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + ConjugateGradient(const MatrixType& A) : Base(A) {} + + ~ConjugateGradient() {} + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * \a x0 as an initial solution. + * + * \sa compute() + */ + template + inline const internal::solve_retval_with_guess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + eigen_assert(Base::rows()==b.rows() + && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval_with_guess + (*this, b.derived(), x0); + } + + /** \internal */ + template + void _solveWithGuess(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + for(int j=0; jtemplate selfadjointView(), b.col(j), xj, + Base::m_preconditioner, m_iterations, m_error); + } + + m_isInitialized = true; + m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; + } + + /** \internal */ + template + void _solve(const Rhs& b, Dest& x) const + { + x.setOnes(); + _solveWithGuess(b,x); + } + +protected: + +}; + + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONJUGATE_GRADIENT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h new file mode 100644 index 000000000..32f152634 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -0,0 +1,476 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_INCOMPLETE_LUT_H +#define EIGEN_INCOMPLETE_LUT_H + +namespace Eigen { + +/** + * \brief Incomplete LU factorization with dual-threshold strategy + * During the numerical factorization, two dropping rules are used : + * 1) any element whose magnitude is less than some tolerance is dropped. + * This tolerance is obtained by multiplying the input tolerance @p droptol + * by the average magnitude of all the original elements in the current row. + * 2) After the elimination of the row, only the @p fill largest elements in + * the L part and the @p fill largest elements in the U part are kept + * (in addition to the diagonal element ). Note that @p fill is computed from + * the input parameter @p fillfactor which is used the ratio to control the fill_in + * relatively to the initial number of nonzero elements. + * + * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) + * and when @p fill=n/2 with @p droptol being different to zero. + * + * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, + * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. + * + * NOTE : The following implementation is derived from the ILUT implementation + * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota + * released under the terms of the GNU LGPL; + * see http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README for more details. + */ +template +class IncompleteLUT : internal::noncopyable +{ + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix Vector; + typedef SparseMatrix FactorType; + typedef SparseMatrix PermutType; + typedef typename FactorType::Index Index; + + public: + typedef Matrix MatrixType; + + IncompleteLUT() + : m_droptol(NumTraits::dummy_precision()), m_fillfactor(10), + m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false) + {} + + template + IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits::dummy_precision(), int fillfactor = 10) + : m_droptol(droptol),m_fillfactor(fillfactor), + m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false) + { + eigen_assert(fillfactor != 0); + compute(mat); + } + + Index rows() const { return m_lu.rows(); } + + Index cols() const { return m_lu.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); + return m_info; + } + + template + void analyzePattern(const MatrixType& amat); + + template + void factorize(const MatrixType& amat); + + /** + * Compute an incomplete LU factorization with dual threshold on the matrix mat + * No pivoting is done in this version + * + **/ + template + IncompleteLUT& compute(const MatrixType& amat) + { + analyzePattern(amat); + factorize(amat); + eigen_assert(m_factorizationIsOk == true); + m_isInitialized = true; + return *this; + } + + void setDroptol(RealScalar droptol); + void setFillfactor(int fillfactor); + + template + void _solve(const Rhs& b, Dest& x) const + { + x = m_Pinv * b; + x = m_lu.template triangularView().solve(x); + x = m_lu.template triangularView().solve(x); + x = m_P * x; + } + + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); + eigen_assert(cols()==b.rows() + && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + +protected: + + template + int QuickSplit(VectorV &row, VectorI &ind, int ncut); + + + /** keeps off-diagonal entries; drops diagonal entries */ + struct keep_diag { + inline bool operator() (const Index& row, const Index& col, const Scalar&) const + { + return row!=col; + } + }; + +protected: + + FactorType m_lu; + RealScalar m_droptol; + int m_fillfactor; + bool m_analysisIsOk; + bool m_factorizationIsOk; + bool m_isInitialized; + ComputationInfo m_info; + PermutationMatrix m_P; // Fill-reducing permutation + PermutationMatrix m_Pinv; // Inverse permutation +}; + +/** + * Set control parameter droptol + * \param droptol Drop any element whose magnitude is less than this tolerance + **/ +template +void IncompleteLUT::setDroptol(RealScalar droptol) +{ + this->m_droptol = droptol; +} + +/** + * Set control parameter fillfactor + * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. + **/ +template +void IncompleteLUT::setFillfactor(int fillfactor) +{ + this->m_fillfactor = fillfactor; +} + + +/** + * Compute a quick-sort split of a vector + * On output, the vector row is permuted such that its elements satisfy + * abs(row(i)) >= abs(row(ncut)) if incut + * \param row The vector of values + * \param ind The array of index for the elements in @p row + * \param ncut The number of largest elements to keep + **/ +template +template +int IncompleteLUT::QuickSplit(VectorV &row, VectorI &ind, int ncut) +{ + using std::swap; + int mid; + int n = row.size(); /* length of the vector */ + int first, last ; + + ncut--; /* to fit the zero-based indices */ + first = 0; + last = n-1; + if (ncut < first || ncut > last ) return 0; + + do { + mid = first; + RealScalar abskey = std::abs(row(mid)); + for (int j = first + 1; j <= last; j++) { + if ( std::abs(row(j)) > abskey) { + ++mid; + swap(row(mid), row(j)); + swap(ind(mid), ind(j)); + } + } + /* Interchange for the pivot element */ + swap(row(mid), row(first)); + swap(ind(mid), ind(first)); + + if (mid > ncut) last = mid - 1; + else if (mid < ncut ) first = mid + 1; + } while (mid != ncut ); + + return 0; /* mid is equal to ncut */ +} + +template +template +void IncompleteLUT::analyzePattern(const _MatrixType& amat) +{ + // Compute the Fill-reducing permutation + SparseMatrix mat1 = amat; + SparseMatrix mat2 = amat.transpose(); + // Symmetrize the pattern + // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. + // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... + SparseMatrix AtA = mat2 + mat1; + AtA.prune(keep_diag()); + internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... + + m_Pinv = m_P.inverse(); // ... and the inverse permutation + + m_analysisIsOk = true; +} + +template +template +void IncompleteLUT::factorize(const _MatrixType& amat) +{ + using std::sqrt; + using std::swap; + using std::abs; + + eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); + int n = amat.cols(); // Size of the matrix + m_lu.resize(n,n); + // Declare Working vectors and variables + Vector u(n) ; // real values of the row -- maximum size is n -- + VectorXi ju(n); // column position of the values in u -- maximum size is n + VectorXi jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 + + // Apply the fill-reducing permutation + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + SparseMatrix mat; + mat = amat.twistedBy(m_Pinv); + + // Initialization + jr.fill(-1); + ju.fill(0); + u.fill(0); + + // number of largest elements to keep in each row: + int fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; + if (fill_in > n) fill_in = n; + + // number of largest nonzero elements to keep in the L and the U part of the current row: + int nnzL = fill_in/2; + int nnzU = nnzL; + m_lu.reserve(n * (nnzL + nnzU + 1)); + + // global loop over the rows of the sparse matrix + for (int ii = 0; ii < n; ii++) + { + // 1 - copy the lower and the upper part of the row i of mat in the working vector u + + int sizeu = 1; // number of nonzero elements in the upper part of the current row + int sizel = 0; // number of nonzero elements in the lower part of the current row + ju(ii) = ii; + u(ii) = 0; + jr(ii) = ii; + RealScalar rownorm = 0; + + typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii + for (; j_it; ++j_it) + { + int k = j_it.index(); + if (k < ii) + { + // copy the lower part + ju(sizel) = k; + u(sizel) = j_it.value(); + jr(k) = sizel; + ++sizel; + } + else if (k == ii) + { + u(ii) = j_it.value(); + } + else + { + // copy the upper part + int jpos = ii + sizeu; + ju(jpos) = k; + u(jpos) = j_it.value(); + jr(k) = jpos; + ++sizeu; + } + rownorm += internal::abs2(j_it.value()); + } + + // 2 - detect possible zero row + if(rownorm==0) + { + m_info = NumericalIssue; + return; + } + // Take the 2-norm of the current row as a relative tolerance + rownorm = sqrt(rownorm); + + // 3 - eliminate the previous nonzero rows + int jj = 0; + int len = 0; + while (jj < sizel) + { + // In order to eliminate in the correct order, + // we must select first the smallest column index among ju(jj:sizel) + int k; + int minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + k += jj; + if (minrow != ju(jj)) + { + // swap the two locations + int j = ju(jj); + swap(ju(jj), ju(k)); + jr(minrow) = jj; jr(j) = k; + swap(u(jj), u(k)); + } + // Reset this location + jr(minrow) = -1; + + // Start elimination + typename FactorType::InnerIterator ki_it(m_lu, minrow); + while (ki_it && ki_it.index() < minrow) ++ki_it; + eigen_internal_assert(ki_it && ki_it.col()==minrow); + Scalar fact = u(jj) / ki_it.value(); + + // drop too small elements + if(abs(fact) <= m_droptol) + { + jj++; + continue; + } + + // linear combination of the current row ii and the row minrow + ++ki_it; + for (; ki_it; ++ki_it) + { + Scalar prod = fact * ki_it.value(); + int j = ki_it.index(); + int jpos = jr(j); + if (jpos == -1) // fill-in element + { + int newpos; + if (j >= ii) // dealing with the upper part + { + newpos = ii + sizeu; + sizeu++; + eigen_internal_assert(sizeu<=n); + } + else // dealing with the lower part + { + newpos = sizel; + sizel++; + eigen_internal_assert(sizel<=ii); + } + ju(newpos) = j; + u(newpos) = -prod; + jr(j) = newpos; + } + else + u(jpos) -= prod; + } + // store the pivot element + u(len) = fact; + ju(len) = minrow; + ++len; + + jj++; + } // end of the elimination on the row ii + + // reset the upper part of the pointer jr to zero + for(int k = 0; k m_droptol * rownorm ) + { + ++len; + u(ii + len) = u(ii + k); + ju(ii + len) = ju(ii + k); + } + } + sizeu = len + 1; // +1 to take into account the diagonal element + len = (std::min)(sizeu, nnzU); + typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); + typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); + QuickSplit(uu, juu, len); + + // store the largest elements of the U part + for(int k = ii + 1; k < ii + len; k++) + m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); + } + + m_lu.finalize(); + m_lu.makeCompressed(); + + m_factorizationIsOk = true; + m_info = Success; +} + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef IncompleteLUT<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INCOMPLETE_LUT_H + diff --git a/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h new file mode 100644 index 000000000..b27ad82ec --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -0,0 +1,269 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H +#define EIGEN_ITERATIVE_SOLVER_BASE_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \brief Base class for linear iterative solvers + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename Derived> +class IterativeSolverBase : internal::noncopyable +{ +public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename internal::traits::Preconditioner Preconditioner; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + +public: + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** Default constructor. */ + IterativeSolverBase() + : mp_matrix(0) + { + init(); + } + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + IterativeSolverBase(const MatrixType& A) + { + init(); + compute(A); + } + + ~IterativeSolverBase() {} + + /** Initializes the iterative solver for the sparcity pattern of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly call analyzePattern on the preconditioner. In the future + * we might, for instance, implement column reodering for faster matrix vector products. + */ + Derived& analyzePattern(const MatrixType& A) + { + m_preconditioner.analyzePattern(A); + m_isInitialized = true; + m_analysisIsOk = true; + m_info = Success; + return derived(); + } + + /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly call factorize on the preconditioner. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + Derived& factorize(const MatrixType& A) + { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + mp_matrix = &A; + m_preconditioner.factorize(A); + m_factorizationIsOk = true; + m_info = Success; + return derived(); + } + + /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly initialized/compute the preconditioner. In the future + * we might, for instance, implement column reodering for faster matrix vector products. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + Derived& compute(const MatrixType& A) + { + mp_matrix = &A; + m_preconditioner.compute(A); + m_isInitialized = true; + m_analysisIsOk = true; + m_factorizationIsOk = true; + m_info = Success; + return derived(); + } + + /** \internal */ + Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; } + /** \internal */ + Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; } + + /** \returns the tolerance threshold used by the stopping criteria */ + RealScalar tolerance() const { return m_tolerance; } + + /** Sets the tolerance threshold used by the stopping criteria */ + Derived& setTolerance(RealScalar tolerance) + { + m_tolerance = tolerance; + return derived(); + } + + /** \returns a read-write reference to the preconditioner for custom configuration. */ + Preconditioner& preconditioner() { return m_preconditioner; } + + /** \returns a read-only reference to the preconditioner. */ + const Preconditioner& preconditioner() const { return m_preconditioner; } + + /** \returns the max number of iterations */ + int maxIterations() const + { + return (mp_matrix && m_maxIterations<0) ? mp_matrix->cols() : m_maxIterations; + } + + /** Sets the max number of iterations */ + Derived& setMaxIterations(int maxIters) + { + m_maxIterations = maxIters; + return derived(); + } + + /** \returns the number of iterations performed during the last solve */ + int iterations() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_iterations; + } + + /** \returns the tolerance error reached during the last solve */ + RealScalar error() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_error; + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + eigen_assert(rows()==b.rows() + && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(derived(), b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::sparse_solve_retval + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + eigen_assert(rows()==b.rows() + && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::sparse_solve_retval(*this, b.derived()); + } + + /** \returns Success if the iterations converged, and NoConvergence otherwise. */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + return m_info; + } + + /** \internal */ + template + void _solve_sparse(const Rhs& b, SparseMatrix &dest) const + { + eigen_assert(rows()==b.rows()); + + int rhsCols = b.cols(); + int size = b.rows(); + Eigen::Matrix tb(size); + Eigen::Matrix tx(size); + for(int k=0; k::epsilon(); + } + const MatrixType* mp_matrix; + Preconditioner m_preconditioner; + + int m_maxIterations; + RealScalar m_tolerance; + + mutable RealScalar m_error; + mutable int m_iterations; + mutable ComputationInfo m_info; + mutable bool m_isInitialized, m_analysisIsOk, m_factorizationIsOk; +}; + +namespace internal { + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef IterativeSolverBase Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve_sparse(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ITERATIVE_SOLVER_BASE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Jacobi/Jacobi.h b/gtsam/3rdparty/Eigen/Eigen/src/Jacobi/Jacobi.h index 98dea6800..691f5f22b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Jacobi/Jacobi.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Jacobi/Jacobi.h @@ -26,6 +26,8 @@ #ifndef EIGEN_JACOBI_H #define EIGEN_JACOBI_H +namespace Eigen { + /** \ingroup Jacobi_Module * \jacobi_module * \class JacobiRotation @@ -326,7 +328,7 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, // both vectors are sequentially stored in memory => vectorization enum { Peeling = 2 }; - Index alignedStart = first_aligned(y, size); + Index alignedStart = internal::first_aligned(y, size); Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; const Packet pc = pset1(j.c()); @@ -344,7 +346,7 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, Scalar* EIGEN_RESTRICT px = x + alignedStart; Scalar* EIGEN_RESTRICT py = y + alignedStart; - if(first_aligned(x, size)==alignedStart) + if(internal::first_aligned(x, size)==alignedStart) { for(Index i=alignedStart; i @@ -109,4 +111,6 @@ inline typename internal::traits::Scalar MatrixBase::determina return internal::determinant_impl::type>::run(derived()); } +} // end namespace Eigen + #endif // EIGEN_DETERMINANT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h index 46ae7d651..c342bc470 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h @@ -25,6 +25,8 @@ #ifndef EIGEN_LU_H #define EIGEN_LU_H +namespace Eigen { + /** \ingroup LU_Module * * \class FullPivLU @@ -282,6 +284,7 @@ template class FullPivLU FullPivLU& setThreshold(Default_t) { m_usePrescribedThreshold = false; + return *this; } /** Returns the threshold that will be used by certain methods such as rank(). @@ -743,4 +746,6 @@ MatrixBase::fullPivLu() const return FullPivLU(eval()); } +} // end namespace Eigen + #endif // EIGEN_LU_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h index 2d3e6d105..aa90dc8ad 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h @@ -25,6 +25,8 @@ #ifndef EIGEN_INVERSE_H #define EIGEN_INVERSE_H +namespace Eigen { + namespace internal { /********************************** @@ -286,7 +288,7 @@ struct inverse_impl : public ReturnByValue > typedef typename MatrixType::Index Index; typedef typename internal::eval::type MatrixTypeNested; typedef typename remove_all::type MatrixTypeNestedCleaned; - const MatrixTypeNested m_matrix; + MatrixTypeNested m_matrix; inverse_impl(const MatrixType& matrix) : m_matrix(matrix) @@ -404,4 +406,6 @@ inline void MatrixBase::computeInverseWithCheck( computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold); } +} // end namespace Eigen + #endif // EIGEN_INVERSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h index 09394b01f..8ae556eb6 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h @@ -26,6 +26,8 @@ #ifndef EIGEN_PARTIALLU_H #define EIGEN_PARTIALLU_H +namespace Eigen { + /** \ingroup LU_Module * * \class PartialPivLU @@ -506,4 +508,6 @@ MatrixBase::lu() const } #endif +} // end namespace Eigen + #endif // EIGEN_PARTIALLU_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU_MKL.h new file mode 100644 index 000000000..9035953c8 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU_MKL.h @@ -0,0 +1,85 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * LU decomposition with partial pivoting based on LAPACKE_?getrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_PARTIALLU_LAPACK_H +#define EIGEN_PARTIALLU_LAPACK_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +namespace internal { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_LU_PARTPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \ +template \ +struct partial_lu_impl \ +{ \ + /* \internal performs the LU decomposition in-place of the matrix represented */ \ + static lapack_int blocked_lu(lapack_int rows, lapack_int cols, EIGTYPE* lu_data, lapack_int luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \ + { \ + EIGEN_UNUSED_VARIABLE(maxBlockSize);\ + lapack_int matrix_order, first_zero_pivot; \ + lapack_int m, n, lda, *ipiv, info; \ + EIGTYPE* a; \ +/* Set up parameters for ?getrf */ \ + matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + lda = luStride; \ + a = lu_data; \ + ipiv = row_transpositions; \ + m = rows; \ + n = cols; \ + nb_transpositions = 0; \ +\ + info = LAPACKE_##MKLPREFIX##getrf( matrix_order, m, n, (MKLTYPE*)a, lda, ipiv ); \ +\ + for(int i=0;i= 0); \ +/* something should be done with nb_transpositions */ \ +\ + first_zero_pivot = info; \ + return first_zero_pivot; \ + } \ +}; + +EIGEN_MKL_LU_PARTPIV(double, double, d) +EIGEN_MKL_LU_PARTPIV(float, float, s) +EIGEN_MKL_LU_PARTPIV(dcomplex, MKL_Complex16, z) +EIGEN_MKL_LU_PARTPIV(scomplex, MKL_Complex8, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARTIALLU_LAPACK_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h index 4c6153f0a..afb8e4a1d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h @@ -42,6 +42,8 @@ #ifndef EIGEN_INVERSE_SSE_H #define EIGEN_INVERSE_SSE_H +namespace Eigen { + namespace internal { template @@ -335,6 +337,8 @@ struct compute_inverse_size4 } }; -} +} // end namespace internal + +} // end namespace Eigen #endif // EIGEN_INVERSE_SSE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/Amd.h b/gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/Amd.h similarity index 91% rename from gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/Amd.h rename to gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/Amd.h index 52fd56bc4..ec13077fe 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/Amd.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/Amd.h @@ -48,13 +48,14 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #ifndef EIGEN_SPARSE_AMD_H #define EIGEN_SPARSE_AMD_H +namespace Eigen { + namespace internal { - -#define CS_FLIP(i) (-(i)-2) -#define CS_UNFLIP(i) (((i) < 0) ? CS_FLIP(i) : (i)) -#define CS_MARKED(w,j) (w[j] < 0) -#define CS_MARK(w,j) { w[j] = CS_FLIP (w[j]); } +template inline T amd_flip(const T& i) { return -i-2; } +template inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; } +template inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; } +template inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); } /* clear w */ template @@ -103,8 +104,9 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind * The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional. * On exit the values of C are destroyed */ template -void minimum_degree_ordering(SparseMatrix& C, PermutationMatrix& perm) +void minimum_degree_ordering(SparseMatrix& C, PermutationMatrix& perm) { + using std::sqrt; typedef SparseMatrix CCS; int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, @@ -113,7 +115,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation unsigned int h; Index n = C.cols(); - dense = std::max (16, 10 * sqrt ((double) n)); /* find dense threshold */ + dense = std::max (16, Index(10 * sqrt(double(n)))); /* find dense threshold */ dense = std::min (n-2, dense); Index cnz = C.nonZeros(); @@ -133,8 +135,8 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation Index* last = perm.indices().data(); /* use P as workspace for last */ /* --- Initialize quotient graph ---------------------------------------- */ - Index* Cp = C._outerIndexPtr(); - Index* Ci = C._innerIndexPtr(); + Index* Cp = C.outerIndexPtr(); + Index* Ci = C.innerIndexPtr(); for(k = 0; k < n; k++) len[k] = Cp[k+1] - Cp[k]; len[n] = 0; @@ -151,7 +153,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation elen[i] = 0; // Ek of node i is empty degree[i] = len[i]; // degree of node i } - mark = cs_wclear (0, 0, w, n); /* clear w */ + mark = internal::cs_wclear(0, 0, w, n); /* clear w */ elen[n] = -2; /* n is a dead element */ Cp[n] = -1; /* n is a root of assembly tree */ w[n] = 0; /* n is a dead element */ @@ -172,7 +174,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation nv[i] = 0; /* absorb i into element n */ elen[i] = -1; /* node i is dead */ nel++; - Cp[i] = CS_FLIP (n); + Cp[i] = amd_flip (n); nv[n]++; } else @@ -201,12 +203,12 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation if((p = Cp[j]) >= 0) /* j is a live node or element */ { Cp[j] = Ci[p]; /* save first entry of object */ - Ci[p] = CS_FLIP (j); /* first entry is now CS_FLIP(j) */ + Ci[p] = amd_flip (j); /* first entry is now amd_flip(j) */ } } for(q = 0, p = 0; p < cnz; ) /* scan all of memory */ { - if((j = CS_FLIP (Ci[p++])) >= 0) /* found object j */ + if((j = amd_flip (Ci[p++])) >= 0) /* found object j */ { Ci[q] = Cp[j]; /* restore first entry of object */ Cp[j] = q++; /* new pointer to object j */ @@ -255,7 +257,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } if(e != k) { - Cp[e] = CS_FLIP (k); /* absorb e into k */ + Cp[e] = amd_flip (k); /* absorb e into k */ w[e] = 0; /* e is now a dead element */ } } @@ -266,7 +268,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation elen[k] = -2; /* k is now an element */ /* --- Find set differences ----------------------------------------- */ - mark = cs_wclear (mark, lemax, w, n); /* clear w if necessary */ + mark = internal::cs_wclear(mark, lemax, w, n); /* clear w if necessary */ for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */ { i = Ci[pk]; @@ -308,7 +310,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } else { - Cp[e] = CS_FLIP (k); /* aggressive absorb. e->k */ + Cp[e] = amd_flip (k); /* aggressive absorb. e->k */ w[e] = 0; /* e is a dead element */ } } @@ -326,7 +328,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } if(d == 0) /* check for mass elimination */ { - Cp[i] = CS_FLIP (k); /* absorb i into k */ + Cp[i] = amd_flip (k); /* absorb i into k */ nvi = -nv[i]; dk -= nvi; /* |Lk| -= |i| */ nvk += nvi; /* |k| += nv[i] */ @@ -349,7 +351,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } /* scan2 is done */ degree[k] = dk; /* finalize |Lk| */ lemax = std::max(lemax, dk); - mark = cs_wclear (mark+lemax, lemax, w, n); /* clear w */ + mark = internal::cs_wclear(mark+lemax, lemax, w, n); /* clear w */ /* --- Supernode detection ------------------------------------------ */ for(pk = pk1; pk < pk2; pk++) @@ -374,7 +376,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } if(ok) /* i and j are identical */ { - Cp[j] = CS_FLIP (i); /* absorb j into i */ + Cp[j] = amd_flip (i); /* absorb j into i */ nv[i] += nv[j]; nv[j] = 0; elen[j] = -1; /* node j is dead */ @@ -416,7 +418,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } /* --- Postordering ----------------------------------------------------- */ - for(i = 0; i < n; i++) Cp[i] = CS_FLIP (Cp[i]);/* fix assembly tree */ + for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */ for(j = 0; j <= n; j++) head[j] = -1; for(j = n; j >= 0; j--) /* place unordered nodes in lists */ { @@ -435,7 +437,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */ { - if(Cp[i] == -1) k = cs_tdfs (i, k, head, next, perm.indices().data(), w); + if(Cp[i] == -1) k = internal::cs_tdfs(i, k, head, next, perm.indices().data(), w); } perm.indices().conservativeResize(n); @@ -445,4 +447,6 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } // namespace internal +} // end namespace Eigen + #endif // EIGEN_SPARSE_AMD_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/CMakeLists.txt new file mode 100644 index 000000000..9f4bb2758 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/OrderingMethods/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_OrderingMethods_SRCS "*.h") + +INSTALL(FILES + ${Eigen_OrderingMethods_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/OrderingMethods COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/CMakeLists.txt new file mode 100644 index 000000000..28c657e9b --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_PastixSupport_SRCS "*.h") + +INSTALL(FILES + ${Eigen_PastixSupport_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PaStiXSupport COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h new file mode 100644 index 000000000..f687a5e6b --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -0,0 +1,757 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PASTIXSUPPORT_H +#define EIGEN_PASTIXSUPPORT_H + +namespace Eigen { + +/** \ingroup PaStiXSupport_Module + * \brief Interface to the PaStix solver + * + * This class is used to solve the linear systems A.X = B via the PaStix library. + * The matrix can be either real or complex, symmetric or not. + * + * \sa TutorialSparseDirectSolvers + */ +template class PastixLU; +template class PastixLLT; +template class PastixLDLT; + +namespace internal +{ + + template struct pastix_traits; + + template + struct pastix_traits< PastixLU<_MatrixType> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::Index Index; + }; + + template + struct pastix_traits< PastixLLT<_MatrixType,Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::Index Index; + }; + + template + struct pastix_traits< PastixLDLT<_MatrixType,Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::Index Index; + }; + + void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); + } + + void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); + } + + void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + } + + void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) + { + if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } + if (nbrhs == 0) {x = NULL; nbrhs=1;} + z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + } + + // Convert the matrix to Fortran-style Numbering + template + void c_to_fortran_numbering (MatrixType& mat) + { + if ( !(mat.outerIndexPtr()[0]) ) + { + int i; + for(i = 0; i <= mat.rows(); ++i) + ++mat.outerIndexPtr()[i]; + for(i = 0; i < mat.nonZeros(); ++i) + ++mat.innerIndexPtr()[i]; + } + } + + // Convert to C-style Numbering + template + void fortran_to_c_numbering (MatrixType& mat) + { + // Check the Numbering + if ( mat.outerIndexPtr()[0] == 1 ) + { // Convert to C-style numbering + int i; + for(i = 0; i <= mat.rows(); ++i) + --mat.outerIndexPtr()[i]; + for(i = 0; i < mat.nonZeros(); ++i) + --mat.innerIndexPtr()[i]; + } + } +} + +// This is the base class to interface with PaStiX functions. +// Users should not used this class directly. +template +class PastixBase : internal::noncopyable +{ + public: + typedef typename internal::pastix_traits::MatrixType _MatrixType; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef Matrix Vector; + typedef SparseMatrix ColSpMatrix; + + public: + + PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false), m_pastixdata(0), m_size(0) + { + init(); + } + + ~PastixBase() + { + clean(); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "Pastix solver is not initialized."); + eigen_assert(rows()==b.rows() + && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + template + bool _solve (const MatrixBase &b, MatrixBase &x) const; + + /** \internal */ + template + void _solve_sparse(const Rhs& b, SparseMatrix &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + eigen_assert(rows()==b.rows()); + + // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. + static const int NbColsAtOnce = 1; + int rhsCols = b.cols(); + int size = b.rows(); + Eigen::Matrix tmp(size,rhsCols); + for(int k=0; k(rhsCols-k, NbColsAtOnce); + tmp.leftCols(actualCols) = b.middleCols(k,actualCols); + tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols)); + dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView(); + } + } + + Derived& derived() + { + return *static_cast(this); + } + const Derived& derived() const + { + return *static_cast(this); + } + + /** Returns a reference to the integer vector IPARM of PaStiX parameters + * to modify the default parameters. + * The statistics related to the different phases of factorization and solve are saved here as well + * \sa analyzePattern() factorize() + */ + Array& iparm() + { + return m_iparm; + } + + /** Return a reference to a particular index parameter of the IPARM vector + * \sa iparm() + */ + + int& iparm(int idxparam) + { + return m_iparm(idxparam); + } + + /** Returns a reference to the double vector DPARM of PaStiX parameters + * The statistics related to the different phases of factorization and solve are saved here as well + * \sa analyzePattern() factorize() + */ + Array& dparm() + { + return m_dparm; + } + + + /** Return a reference to a particular index parameter of the DPARM vector + * \sa dparm() + */ + double& dparm(int idxparam) + { + return m_dparm(idxparam); + } + + inline Index cols() const { return m_size; } + inline Index rows() const { return m_size; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the PaStiX reports a problem + * \c InvalidInput if the input matrix is invalid + * + * \sa iparm() + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::sparse_solve_retval + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_isInitialized && "Pastix LU, LLT or LDLT is not initialized."); + eigen_assert(rows()==b.rows() + && "PastixBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::sparse_solve_retval(*this, b.derived()); + } + + protected: + + // Initialize the Pastix data structure, check the matrix + void init(); + + // Compute the ordering and the symbolic factorization + void analyzePattern(ColSpMatrix& mat); + + // Compute the numerical factorization + void factorize(ColSpMatrix& mat); + + // Free all the data allocated by Pastix + void clean() + { + eigen_assert(m_initisOk && "The Pastix structure should be allocated first"); + m_iparm(IPARM_START_TASK) = API_TASK_CLEAN; + m_iparm(IPARM_END_TASK) = API_TASK_CLEAN; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, + m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + } + + void compute(ColSpMatrix& mat); + + int m_initisOk; + int m_analysisIsOk; + int m_factorizationIsOk; + bool m_isInitialized; + mutable ComputationInfo m_info; + mutable pastix_data_t *m_pastixdata; // Data structure for pastix + mutable int m_comm; // The MPI communicator identifier + mutable Matrix m_iparm; // integer vector for the input parameters + mutable Matrix m_dparm; // Scalar vector for the input parameters + mutable Matrix m_perm; // Permutation vector + mutable Matrix m_invp; // Inverse permutation vector + mutable int m_size; // Size of the matrix +}; + + /** Initialize the PaStiX data structure. + *A first call to this function fills iparm and dparm with the default PaStiX parameters + * \sa iparm() dparm() + */ +template +void PastixBase::init() +{ + m_size = 0; + m_iparm.setZero(IPARM_SIZE); + m_dparm.setZero(DPARM_SIZE); + + m_iparm(IPARM_MODIFY_PARAMETER) = API_NO; + pastix(&m_pastixdata, MPI_COMM_WORLD, + 0, 0, 0, 0, + 0, 0, 0, 1, m_iparm.data(), m_dparm.data()); + + m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO; + m_iparm[IPARM_VERBOSE] = 2; + m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH; + m_iparm[IPARM_INCOMPLETE] = API_NO; + m_iparm[IPARM_OOC_LIMIT] = 2000; + m_iparm[IPARM_RHS_MAKING] = API_RHS_B; + m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; + + m_iparm(IPARM_START_TASK) = API_TASK_INIT; + m_iparm(IPARM_END_TASK) = API_TASK_INIT; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0, + 0, 0, 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) { + m_info = InvalidInput; + m_initisOk = false; + } + else { + m_info = Success; + m_initisOk = true; + } +} + +template +void PastixBase::compute(ColSpMatrix& mat) +{ + eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared"); + + analyzePattern(mat); + factorize(mat); + + m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO; + m_isInitialized = m_factorizationIsOk; +} + + +template +void PastixBase::analyzePattern(ColSpMatrix& mat) +{ + eigen_assert(m_initisOk && "The initialization of PaSTiX failed"); + + // clean previous calls + if(m_size>0) + clean(); + + m_size = mat.rows(); + m_perm.resize(m_size); + m_invp.resize(m_size); + + m_iparm(IPARM_START_TASK) = API_TASK_ORDERING; + m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE; + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), + mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) + { + m_info = NumericalIssue; + m_analysisIsOk = false; + } + else + { + m_info = Success; + m_analysisIsOk = true; + } +} + +template +void PastixBase::factorize(ColSpMatrix& mat) +{ +// if(&m_cpyMat != &mat) m_cpyMat = mat; + eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); + m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; + m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; + m_size = mat.rows(); + + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), + mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); + + // Check the returned error + if(m_iparm(IPARM_ERROR_NUMBER)) + { + m_info = NumericalIssue; + m_factorizationIsOk = false; + m_isInitialized = false; + } + else + { + m_info = Success; + m_factorizationIsOk = true; + m_isInitialized = true; + } +} + +/* Solve the system */ +template +template +bool PastixBase::_solve (const MatrixBase &b, MatrixBase &x) const +{ + eigen_assert(m_isInitialized && "The matrix should be factorized first"); + EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + int rhs = 1; + + x = b; /* on return, x is overwritten by the computed solution */ + + for (int i = 0; i < b.cols(); i++){ + m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; + m_iparm[IPARM_END_TASK] = API_TASK_REFINE; + + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0, + m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); + } + + // Check the returned error + m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue; + + return m_iparm(IPARM_ERROR_NUMBER)==0; +} + +/** \ingroup PaStiXSupport_Module + * \class PastixLU + * \brief Sparse direct LU solver based on PaStiX library + * + * This class is used to solve the linear systems A.X = B with a supernodal LU + * factorization in the PaStiX library. The matrix A should be squared and nonsingular + * PaStiX requires that the matrix A has a symmetric structural pattern. + * This interface can symmetrize the input matrix otherwise. + * The vectors or matrices X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false + * NOTE : Note that if the analysis and factorization phase are called separately, + * the input matrix will be symmetrized at each call, hence it is advised to + * symmetrize the matrix in a end-user program and set \p IsStrSym to true + * + * \sa \ref TutorialSparseDirectSolvers + * + */ +template +class PastixLU : public PastixBase< PastixLU<_MatrixType> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + typedef typename MatrixType::Index Index; + + public: + PastixLU() : Base() + { + init(); + } + + PastixLU(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + /** Compute the LU supernodal factorization of \p matrix. + * iparm and dparm can be used to tune the PaStiX parameters. + * see the PaStiX user's manual + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + m_structureIsUptodate = false; + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern. + * Several ordering methods can be used at this step. See the PaStiX user's manual. + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + m_structureIsUptodate = false; + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + + /** Compute the LU supernodal factorization of \p matrix + * WARNING The matrix \p matrix should have the same structural pattern + * as the same used in the analysis phase. + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + protected: + + void init() + { + m_structureIsUptodate = false; + m_iparm(IPARM_SYM) = API_SYM_NO; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LU; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + if(IsStrSym) + out = matrix; + else + { + if(!m_structureIsUptodate) + { + // update the transposed structure + m_transposedStructure = matrix.transpose(); + + // Set the elements of the matrix to zero + for (Index j=0; j + * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + + public: + enum { UpLo = _UpLo }; + PastixLLT() : Base() + { + init(); + } + + PastixLLT(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + + /** Compute the L factor of the LL^T supernodal factorization of \p matrix + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + + /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + /** Compute the LL^T supernodal numerical factorization of \p matrix + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + protected: + using Base::m_iparm; + + void init() + { + m_iparm(IPARM_SYM) = API_SYM_YES; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + // Pastix supports only lower, column-major matrices + out.template selfadjointView() = matrix.template selfadjointView(); + internal::c_to_fortran_numbering(out); + } +}; + +/** \ingroup PaStiXSupport_Module + * \class PastixLDLT + * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library + * + * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization + * available in the PaStiX library. The matrix A should be symmetric and positive definite + * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX + * The vectors or matrices X and B can be either dense or sparse + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> > +{ + public: + typedef _MatrixType MatrixType; + typedef PastixBase > Base; + typedef typename Base::ColSpMatrix ColSpMatrix; + + public: + enum { UpLo = _UpLo }; + PastixLDLT():Base() + { + init(); + } + + PastixLDLT(const MatrixType& matrix):Base() + { + init(); + compute(matrix); + } + + /** Compute the L and D factors of the LDL^T factorization of \p matrix + * \sa analyzePattern() factorize() + */ + void compute (const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::compute(temp); + } + + /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern + * The result of this operation can be used with successive matrices having the same pattern as \p matrix + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::analyzePattern(temp); + } + /** Compute the LDL^T supernodal numerical factorization of \p matrix + * + */ + void factorize(const MatrixType& matrix) + { + ColSpMatrix temp; + grabMatrix(matrix, temp); + Base::factorize(temp); + } + + protected: + using Base::m_iparm; + + void init() + { + m_iparm(IPARM_SYM) = API_SYM_YES; + m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT; + } + + void grabMatrix(const MatrixType& matrix, ColSpMatrix& out) + { + // Pastix supports only lower, column-major matrices + out.template selfadjointView() = matrix.template selfadjointView(); + internal::c_to_fortran_numbering(out); + } +}; + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef PastixBase<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef PastixBase<_MatrixType> Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve_sparse(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif diff --git a/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/CMakeLists.txt new file mode 100644 index 000000000..a097ab401 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_PardisoSupport_SRCS "*.h") + +INSTALL(FILES + ${Eigen_PardisoSupport_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PardisoSupport COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h new file mode 100644 index 000000000..e6defc8c3 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h @@ -0,0 +1,614 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL PARDISO + ******************************************************************************** +*/ + +#ifndef EIGEN_PARDISOSUPPORT_H +#define EIGEN_PARDISOSUPPORT_H + +namespace Eigen { + +template class PardisoLU; +template class PardisoLLT; +template class PardisoLDLT; + +namespace internal +{ + template + struct pardiso_run_selector + { + static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a, + Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x) + { + Index error = 0; + ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); + return error; + } + }; + template<> + struct pardiso_run_selector + { + typedef long long int Index; + static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a, + Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x) + { + Index error = 0; + ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); + return error; + } + }; + + template struct pardiso_traits; + + template + struct pardiso_traits< PardisoLU<_MatrixType> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::Index Index; + }; + + template + struct pardiso_traits< PardisoLLT<_MatrixType, Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::Index Index; + }; + + template + struct pardiso_traits< PardisoLDLT<_MatrixType, Options> > + { + typedef _MatrixType MatrixType; + typedef typename _MatrixType::Scalar Scalar; + typedef typename _MatrixType::RealScalar RealScalar; + typedef typename _MatrixType::Index Index; + }; + +} + +template +class PardisoImpl +{ + typedef internal::pardiso_traits Traits; + public: + typedef typename Traits::MatrixType MatrixType; + typedef typename Traits::Scalar Scalar; + typedef typename Traits::RealScalar RealScalar; + typedef typename Traits::Index Index; + typedef SparseMatrix SparseMatrixType; + typedef Matrix VectorType; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + enum { + ScalarIsComplex = NumTraits::IsComplex + }; + + PardisoImpl() + { + eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type"); + m_iparm.setZero(); + m_msglvl = 0; // No output + m_initialized = false; + } + + ~PardisoImpl() + { + pardisoRelease(); + } + + inline Index cols() const { return m_size; } + inline Index rows() const { return m_size; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_initialized && "Decomposition is not initialized."); + return m_info; + } + + /** \warning for advanced usage only. + * \returns a reference to the parameter array controlling PARDISO. + * See the PARDISO manual to know how to use it. */ + Array& pardisoParameterArray() + { + return m_iparm; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + Derived& analyzePattern(const MatrixType& matrix); + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + Derived& factorize(const MatrixType& matrix); + + Derived& compute(const MatrixType& matrix); + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_initialized && "Pardiso solver is not initialized."); + eigen_assert(rows()==b.rows() + && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::sparse_solve_retval + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_initialized && "Pardiso solver is not initialized."); + eigen_assert(rows()==b.rows() + && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b"); + return internal::sparse_solve_retval(*this, b.derived()); + } + + Derived& derived() + { + return *static_cast(this); + } + const Derived& derived() const + { + return *static_cast(this); + } + + template + bool _solve(const MatrixBase &b, MatrixBase& x) const; + + /** \internal */ + template + void _solve_sparse(const Rhs& b, SparseMatrix &dest) const + { + eigen_assert(m_size==b.rows()); + + // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. + static const int NbColsAtOnce = 4; + int rhsCols = b.cols(); + int size = b.rows(); + // Pardiso cannot solve in-place, + // so we need two temporaries + Eigen::Matrix tmp_rhs(size,rhsCols); + Eigen::Matrix tmp_res(size,rhsCols); + for(int k=0; k(rhsCols-k, NbColsAtOnce); + tmp_rhs.leftCols(actualCols) = b.middleCols(k,actualCols); + tmp_res.leftCols(actualCols) = derived().solve(tmp_rhs.leftCols(actualCols)); + dest.middleCols(k,actualCols) = tmp_res.leftCols(actualCols).sparseView(); + } + } + + protected: + void pardisoRelease() + { + if(m_initialized) // Factorization ran at least once + { + internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0, + m_iparm.data(), m_msglvl, 0, 0); + } + } + + void pardisoInit(int type) + { + m_type = type; + bool symmetric = abs(m_type) < 10; + m_iparm[0] = 1; // No solver default + m_iparm[1] = 3; // use Metis for the ordering + m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS + m_iparm[3] = 0; // No iterative-direct algorithm + m_iparm[4] = 0; // No user fill-in reducing permutation + m_iparm[5] = 0; // Write solution into x + m_iparm[6] = 0; // Not in use + m_iparm[7] = 2; // Max numbers of iterative refinement steps + m_iparm[8] = 0; // Not in use + m_iparm[9] = 13; // Perturb the pivot elements with 1E-13 + m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS + m_iparm[11] = 0; // Not in use + m_iparm[12] = symmetric ? 0 : 1; // Maximum weighted matching algorithm is switched-off (default for symmetric). + // Try m_iparm[12] = 1 in case of inappropriate accuracy + m_iparm[13] = 0; // Output: Number of perturbed pivots + m_iparm[14] = 0; // Not in use + m_iparm[15] = 0; // Not in use + m_iparm[16] = 0; // Not in use + m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU + m_iparm[18] = -1; // Output: Mflops for LU factorization + m_iparm[19] = 0; // Output: Numbers of CG Iterations + + m_iparm[20] = 0; // 1x1 pivoting + m_iparm[26] = 0; // No matrix checker + m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0; + m_iparm[34] = 1; // C indexing + m_iparm[59] = 1; // Automatic switch between In-Core and Out-of-Core modes + } + + protected: + // cached data to reduce reallocation, etc. + + void manageErrorCode(Index error) + { + switch(error) + { + case 0: + m_info = Success; + break; + case -4: + case -7: + m_info = NumericalIssue; + break; + default: + m_info = InvalidInput; + } + } + + mutable SparseMatrixType m_matrix; + ComputationInfo m_info; + bool m_initialized, m_analysisIsOk, m_factorizationIsOk; + Index m_type, m_msglvl; + mutable void *m_pt[64]; + mutable Array m_iparm; + mutable IntColVectorType m_perm; + Index m_size; + + private: + PardisoImpl(PardisoImpl &) {} +}; + +template +Derived& PardisoImpl::compute(const MatrixType& a) +{ + m_size = a.rows(); + eigen_assert(a.rows() == a.cols()); + + pardisoRelease(); + memset(m_pt, 0, sizeof(m_pt)); + m_perm.setZero(m_size); + derived().getMatrix(a); + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 12, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + + manageErrorCode(error); + m_analysisIsOk = true; + m_factorizationIsOk = true; + m_initialized = true; + return derived(); +} + +template +Derived& PardisoImpl::analyzePattern(const MatrixType& a) +{ + m_size = a.rows(); + eigen_assert(m_size == a.cols()); + + pardisoRelease(); + memset(m_pt, 0, sizeof(m_pt)); + m_perm.setZero(m_size); + derived().getMatrix(a); + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 11, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + + manageErrorCode(error); + m_analysisIsOk = true; + m_factorizationIsOk = false; + m_initialized = true; + return derived(); +} + +template +Derived& PardisoImpl::factorize(const MatrixType& a) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + eigen_assert(m_size == a.rows() && m_size == a.cols()); + + derived().getMatrix(a); + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 22, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + + manageErrorCode(error); + m_factorizationIsOk = true; + return derived(); +} + +template +template +bool PardisoImpl::_solve(const MatrixBase &b, MatrixBase& x) const +{ + if(m_iparm[0] == 0) // Factorization was not computed + return false; + + //Index n = m_matrix.rows(); + Index nrhs = Index(b.cols()); + eigen_assert(m_size==b.rows()); + eigen_assert(((MatrixBase::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported"); + eigen_assert(((MatrixBase::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported"); + eigen_assert(((nrhs == 1) || b.outerStride() == b.rows())); + + +// switch (transposed) { +// case SvNoTrans : m_iparm[11] = 0 ; break; +// case SvTranspose : m_iparm[11] = 2 ; break; +// case SvAdjoint : m_iparm[11] = 1 ; break; +// default: +// //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the PARDISO backend\n"; +// m_iparm[11] = 0; +// } + + Scalar* rhs_ptr = const_cast(b.derived().data()); + Matrix tmp; + + // Pardiso cannot solve in-place + if(rhs_ptr == x.derived().data()) + { + tmp = b; + rhs_ptr = tmp.data(); + } + + Index error; + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 33, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), nrhs, m_iparm.data(), m_msglvl, + rhs_ptr, x.derived().data()); + + return error==0; +} + + +/** \ingroup PardisoSupport_Module + * \class PardisoLU + * \brief A sparse direct LU factorization and solver based on the PARDISO library + * + * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization + * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible. + * The vectors or matrices X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class PardisoLU : public PardisoImpl< PardisoLU > +{ + protected: + typedef PardisoImpl< PardisoLU > Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + using Base::pardisoInit; + using Base::m_matrix; + friend class PardisoImpl< PardisoLU >; + + public: + + using Base::compute; + using Base::solve; + + PardisoLU() + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 13 : 11); + } + + PardisoLU(const MatrixType& matrix) + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 13 : 11); + compute(matrix); + } + protected: + void getMatrix(const MatrixType& matrix) + { + m_matrix = matrix; + } + + private: + PardisoLU(PardisoLU& ) {} +}; + +/** \ingroup PardisoSupport_Module + * \class PardisoLLT + * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library + * + * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization + * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite. + * The vectors or matrices X and B can be either dense or sparse. + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used. + * Upper|Lower can be used to tell both triangular parts can be used as input. + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class PardisoLLT : public PardisoImpl< PardisoLLT > +{ + protected: + typedef PardisoImpl< PardisoLLT > Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::Index Index; + typedef typename Base::RealScalar RealScalar; + using Base::pardisoInit; + using Base::m_matrix; + friend class PardisoImpl< PardisoLLT >; + + public: + + enum { UpLo = _UpLo }; + using Base::compute; + using Base::solve; + + PardisoLLT() + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 4 : 2); + } + + PardisoLLT(const MatrixType& matrix) + : Base() + { + pardisoInit(Base::ScalarIsComplex ? 4 : 2); + compute(matrix); + } + + protected: + + void getMatrix(const MatrixType& matrix) + { + // PARDISO supports only upper, row-major matrices + PermutationMatrix p_null; + m_matrix.resize(matrix.rows(), matrix.cols()); + m_matrix.template selfadjointView() = matrix.template selfadjointView().twistedBy(p_null); + } + + private: + PardisoLLT(PardisoLLT& ) {} +}; + +/** \ingroup PardisoSupport_Module + * \class PardisoLDLT + * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library + * + * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization + * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite. + * For complex matrices, A can also be symmetric only, see the \a Options template parameter. + * The vectors or matrices X and B can be either dense or sparse. + * + * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used. + * Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix. + * Upper|Lower can be used to tell both triangular parts can be used as input. + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class PardisoLDLT : public PardisoImpl< PardisoLDLT > +{ + protected: + typedef PardisoImpl< PardisoLDLT > Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::Index Index; + typedef typename Base::RealScalar RealScalar; + using Base::pardisoInit; + using Base::m_matrix; + friend class PardisoImpl< PardisoLDLT >; + + public: + + using Base::compute; + using Base::solve; + enum { UpLo = Options&(Upper|Lower) }; + + PardisoLDLT() + : Base() + { + pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2); + } + + PardisoLDLT(const MatrixType& matrix) + : Base() + { + pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2); + compute(matrix); + } + + void getMatrix(const MatrixType& matrix) + { + // PARDISO supports only upper, row-major matrices + PermutationMatrix p_null; + m_matrix.resize(matrix.rows(), matrix.cols()); + m_matrix.template selfadjointView() = matrix.template selfadjointView().twistedBy(p_null); + } + + private: + PardisoLDLT(PardisoLDLT& ) {} +}; + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef PardisoImpl<_Derived> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef PardisoImpl Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve_sparse(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARDISOSUPPORT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h index f04c6038d..9550b6bf6 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h @@ -26,6 +26,8 @@ #ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H #define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H +namespace Eigen { + /** \ingroup QR_Module * * \class ColPivHouseholderQR @@ -528,5 +530,6 @@ MatrixBase::colPivHouseholderQr() const return ColPivHouseholderQR(eval()); } +} // end namespace Eigen #endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h new file mode 100644 index 000000000..0ad66d3f8 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h @@ -0,0 +1,98 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Householder QR decomposition of a matrix with column pivoting based on + * LAPACKE_?geqp3 function. + ******************************************************************************** +*/ + +#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H +#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_QR_COLPIV(EIGTYPE, MKLTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \ +template<> \ +ColPivHouseholderQR >& \ +ColPivHouseholderQR >::compute( \ + const Matrix& matrix) \ +\ +{ \ + typedef Matrix MatrixType; \ + typedef MatrixType::Scalar Scalar; \ + typedef MatrixType::RealScalar RealScalar; \ + Index rows = matrix.rows();\ + Index cols = matrix.cols();\ + Index size = matrix.diagonalSize();\ +\ + m_qr = matrix;\ + m_hCoeffs.resize(size);\ +\ + m_colsTranspositions.resize(cols);\ + /*Index number_of_transpositions = 0;*/ \ +\ + m_nonzero_pivots = 0; \ + m_maxpivot = RealScalar(0);\ + m_colsPermutation.resize(cols); \ + m_colsPermutation.indices().setZero(); \ +\ + lapack_int lda = m_qr.outerStride(), i; \ + lapack_int matrix_order = MKLCOLROW; \ + LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \ + m_isInitialized = true; \ + m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \ + m_hCoeffs.adjointInPlace(); \ + RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); \ + lapack_int *perm = m_colsPermutation.indices().data(); \ + for(i=0;i premultiplied_threshold);\ + } \ + for(i=0;i struct FullPivHouseholderQRMatrixQReturnType; + +template +struct traits > +{ + typedef typename MatrixType::PlainObject ReturnType; +}; + +} + /** \ingroup QR_Module * * \class FullPivHouseholderQR @@ -62,7 +76,7 @@ template class FullPivHouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; - typedef Matrix MatrixQType; + typedef internal::FullPivHouseholderQRMatrixQReturnType MatrixQReturnType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef Matrix IntRowVectorType; typedef PermutationMatrix PermutationType; @@ -139,7 +153,9 @@ template class FullPivHouseholderQR return internal::solve_retval(*this, b.derived()); } - MatrixQType matrixQ(void) const; + /** \returns Expression object representing the matrix Q + */ + MatrixQReturnType matrixQ(void) const; /** \returns a reference to the matrix where the Householder QR decomposition is stored */ @@ -508,28 +524,73 @@ struct solve_retval, Rhs> } }; +/** \ingroup QR_Module + * + * \brief Expression type for return value of FullPivHouseholderQR::matrixQ() + * + * \tparam MatrixType type of underlying dense matrix + */ +template struct FullPivHouseholderQRMatrixQReturnType + : public ReturnByValue > +{ +public: + typedef typename MatrixType::Index Index; + typedef typename internal::plain_col_type::type IntColVectorType; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef Matrix WorkVectorType; + + FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr, + const HCoeffsType& hCoeffs, + const IntColVectorType& rowsTranspositions) + : m_qr(qr), + m_hCoeffs(hCoeffs), + m_rowsTranspositions(rowsTranspositions) + {} + + template + void evalTo(ResultType& result) const + { + const Index rows = m_qr.rows(); + WorkVectorType workspace(rows); + evalTo(result, workspace); + } + + template + void evalTo(ResultType& result, WorkVectorType& workspace) const + { + // compute the product H'_0 H'_1 ... H'_n-1, + // where H_k is the k-th Householder transformation I - h_k v_k v_k' + // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] + const Index rows = m_qr.rows(); + const Index cols = m_qr.cols(); + const Index size = (std::min)(rows, cols); + workspace.resize(rows); + result.setIdentity(rows, rows); + for (Index k = size-1; k >= 0; k--) + { + result.block(k, k, rows-k, rows-k) + .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), internal::conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k)); + result.row(k).swap(result.row(m_rowsTranspositions.coeff(k))); + } + } + + Index rows() const { return m_qr.rows(); } + Index cols() const { return m_qr.rows(); } + +protected: + typename MatrixType::Nested m_qr; + typename HCoeffsType::Nested m_hCoeffs; + typename IntColVectorType::Nested m_rowsTranspositions; +}; + } // end namespace internal -/** \returns the matrix Q */ template -typename FullPivHouseholderQR::MatrixQType FullPivHouseholderQR::matrixQ() const +inline typename FullPivHouseholderQR::MatrixQReturnType FullPivHouseholderQR::matrixQ() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - // compute the product H'_0 H'_1 ... H'_n-1, - // where H_k is the k-th Householder transformation I - h_k v_k v_k' - // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] - Index rows = m_qr.rows(); - Index cols = m_qr.cols(); - Index size = (std::min)(rows,cols); - MatrixQType res = MatrixQType::Identity(rows, rows); - Matrix temp(rows); - for (Index k = size-1; k >= 0; k--) - { - res.block(k, k, rows-k, rows-k) - .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), internal::conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k)); - res.row(k).swap(res.row(m_rows_transpositions.coeff(k))); - } - return res; + return MatrixQReturnType(m_qr, m_hCoeffs, m_rows_transpositions); } /** \return the full-pivoting Householder QR decomposition of \c *this. @@ -543,4 +604,6 @@ MatrixBase::fullPivHouseholderQr() const return FullPivHouseholderQR(eval()); } +} // end namespace Eigen + #endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h index 9ee96de26..59f6fcaa2 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h @@ -27,6 +27,8 @@ #ifndef EIGEN_QR_H #define EIGEN_QR_H +namespace Eigen { + /** \ingroup QR_Module * * @@ -351,5 +353,6 @@ MatrixBase::householderQr() const return HouseholderQR(eval()); } +} // end namespace Eigen #endif // EIGEN_QR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR_MKL.h new file mode 100644 index 000000000..5313de604 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR_MKL.h @@ -0,0 +1,69 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Householder QR decomposition of a matrix w/o pivoting based on + * LAPACKE_?geqrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_QR_MKL_H +#define EIGEN_QR_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +namespace internal { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_QR_NOPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \ +template \ +void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, \ + typename MatrixQR::Index maxBlockSize=32, \ + EIGTYPE* tempData = 0) \ +{ \ + lapack_int m = mat.rows(); \ + lapack_int n = mat.cols(); \ + lapack_int lda = mat.outerStride(); \ + lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \ + hCoeffs.adjointInPlace(); \ +\ +} + +EIGEN_MKL_QR_NOPIV(double, double, d) +EIGEN_MKL_QR_NOPIV(float, float, s) +EIGEN_MKL_QR_NOPIV(dcomplex, MKL_Complex16, z) +EIGEN_MKL_QR_NOPIV(scomplex, MKL_Complex8, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_QR_MKL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h index 3c423095c..9eadaa9fc 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h @@ -25,6 +25,8 @@ #ifndef EIGEN_JACOBISVD_H #define EIGEN_JACOBISVD_H +namespace Eigen { + namespace internal { // forward declaration (needed by ICC) // the empty body is required by MSVC @@ -61,9 +63,12 @@ template struct qr_preconditioner_impl {}; template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD&, const MatrixType&) +public: + typedef typename MatrixType::Index Index; + void allocate(const JacobiSVD&) {} + bool run(JacobiSVD&, const MatrixType&) { return false; } @@ -72,134 +77,279 @@ struct qr_preconditioner_impl /*** preconditioner using FullPivHouseholderQR ***/ template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD& svd, const MatrixType& matrix) +public: + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime + }; + typedef Matrix WorkspaceType; + + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr = FullPivHouseholderQR(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { - FullPivHouseholderQR qr(matrix); - svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) svd.m_matrixU = qr.matrixQ(); - if(svd.computeV()) svd.m_matrixV = qr.colsPermutation(); + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); + if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } +private: + FullPivHouseholderQR m_qr; + WorkspaceType m_workspace; }; template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD& svd, const MatrixType& matrix) +public: + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + typedef Matrix + TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr = FullPivHouseholderQR(svd.cols(), svd.rows()); + } + m_adjoint.resize(svd.cols(), svd.rows()); + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { - typedef Matrix - TransposeTypeWithSameStorageOrder; - FullPivHouseholderQR qr(matrix.adjoint()); - svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) svd.m_matrixV = qr.matrixQ(); - if(svd.computeU()) svd.m_matrixU = qr.colsPermutation(); + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); + if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } +private: + FullPivHouseholderQR m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using ColPivHouseholderQR ***/ template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD& svd, const MatrixType& matrix) +public: + typedef typename MatrixType::Index Index; + + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr = ColPivHouseholderQR(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { - ColPivHouseholderQR qr(matrix); - svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) svd.m_matrixU = qr.householderQ(); - else if(svd.m_computeThinU) { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) + { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); - qr.householderQ().applyThisOnTheLeft(svd.m_matrixU); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); } - if(svd.computeV()) svd.m_matrixV = qr.colsPermutation(); + if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } + +private: + ColPivHouseholderQR m_qr; + typename internal::plain_col_type::type m_workspace; }; template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD& svd, const MatrixType& matrix) +public: + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + + typedef Matrix + TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr = ColPivHouseholderQR(svd.cols(), svd.rows()); + } + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); + m_adjoint.resize(svd.cols(), svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { - typedef Matrix - TransposeTypeWithSameStorageOrder; - ColPivHouseholderQR qr(matrix.adjoint()); - svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) svd.m_matrixV = qr.householderQ(); - else if(svd.m_computeThinV) { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) + { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); - qr.householderQ().applyThisOnTheLeft(svd.m_matrixV); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); } - if(svd.computeU()) svd.m_matrixU = qr.colsPermutation(); + if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } + +private: + ColPivHouseholderQR m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using HouseholderQR ***/ template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD& svd, const MatrixType& matrix) +public: + typedef typename MatrixType::Index Index; + + void allocate(const JacobiSVD& svd) + { + if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) + { + m_qr = HouseholderQR(svd.rows(), svd.cols()); + } + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { - HouseholderQR qr(matrix); - svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) svd.m_matrixU = qr.householderQ(); - else if(svd.m_computeThinU) { + m_qr.compute(matrix); + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) + { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); - qr.householderQ().applyThisOnTheLeft(svd.m_matrixU); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); } if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); return true; } return false; } +private: + HouseholderQR m_qr; + typename internal::plain_col_type::type m_workspace; }; template -struct qr_preconditioner_impl +class qr_preconditioner_impl { - static bool run(JacobiSVD& svd, const MatrixType& matrix) +public: + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + enum + { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + Options = MatrixType::Options + }; + + typedef Matrix + TransposeTypeWithSameStorageOrder; + + void allocate(const JacobiSVD& svd) + { + if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) + { + m_qr = HouseholderQR(svd.cols(), svd.rows()); + } + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); + m_adjoint.resize(svd.cols(), svd.rows()); + } + + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { - typedef Matrix - TransposeTypeWithSameStorageOrder; - HouseholderQR qr(matrix.adjoint()); - svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) svd.m_matrixV = qr.householderQ(); - else if(svd.m_computeThinV) { + m_adjoint = matrix.adjoint(); + m_qr.compute(m_adjoint); + + svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) + { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); - qr.householderQ().applyThisOnTheLeft(svd.m_matrixV); + m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); } if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); return true; } else return false; } + +private: + HouseholderQR m_qr; + TransposeTypeWithSameStorageOrder m_adjoint; + typename internal::plain_row_type::type m_workspace; }; /*** 2x2 SVD implementation @@ -316,7 +466,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, * Here's an example demonstrating basic usage: * \include JacobiSVD_basic.cpp * Output: \verbinclude JacobiSVD_basic.out - * + * * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. @@ -324,7 +474,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, * * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to * terminate in finite (and reasonable) time. - * + * * The possible values for QRPreconditioner are: * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. @@ -494,7 +644,7 @@ template class JacobiSVD * \param b the right-hand-side of the equation to solve. * * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. - * + * * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. */ @@ -535,6 +685,9 @@ template class JacobiSVD friend struct internal::svd_precondition_2x2_block_to_be_real; template friend struct internal::qr_preconditioner_impl; + + internal::qr_preconditioner_impl m_qr_precond_morecols; + internal::qr_preconditioner_impl m_qr_precond_morerows; }; template @@ -578,6 +731,9 @@ void JacobiSVD::allocate(Index rows, Index cols, u : m_computeThinV ? m_diagSize : 0); m_workMatrix.resize(m_diagSize, m_diagSize); + + if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); + if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); } template @@ -595,8 +751,7 @@ JacobiSVD::compute(const MatrixType& matrix, unsig /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ - if(!internal::qr_preconditioner_impl::run(*this, matrix) - && !internal::qr_preconditioner_impl::run(*this, matrix)) + if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix)) { m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize); if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); @@ -722,6 +877,6 @@ MatrixBase::jacobiSvd(unsigned int computationOptions) const return JacobiSVD(*this, computationOptions); } - +} // end namespace Eigen #endif // EIGEN_JACOBISVD_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD_MKL.h b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD_MKL.h new file mode 100644 index 000000000..1eeafca49 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD_MKL.h @@ -0,0 +1,92 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Singular Value Decomposition - SVD. + ******************************************************************************** +*/ + +#ifndef EIGEN_JACOBISVD_MKL_H +#define EIGEN_JACOBISVD_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" + +namespace Eigen { + +/** \internal Specialization for the data types supported by MKL */ + +#define EIGEN_MKL_SVD(EIGTYPE, MKLTYPE, MKLRTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \ +template<> \ +JacobiSVD, ColPivHouseholderQRPreconditioner>& \ +JacobiSVD, ColPivHouseholderQRPreconditioner>::compute(const Matrix& matrix, unsigned int computationOptions) \ +{ \ + typedef Matrix MatrixType; \ + typedef MatrixType::Scalar Scalar; \ + typedef MatrixType::RealScalar RealScalar; \ + allocate(matrix.rows(), matrix.cols(), computationOptions); \ +\ + /*const RealScalar precision = RealScalar(2) * NumTraits::epsilon();*/ \ + m_nonzeroSingularValues = m_diagSize; \ +\ + lapack_int lda = matrix.outerStride(), ldu, ldvt; \ + lapack_int matrix_order = MKLCOLROW; \ + char jobu, jobvt; \ + MKLTYPE *u, *vt, dummy; \ + jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \ + jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \ + if (computeU()) { \ + ldu = m_matrixU.outerStride(); \ + u = (MKLTYPE*)m_matrixU.data(); \ + } else { ldu=1; u=&dummy; }\ + MatrixType localV; \ + ldvt = (m_computeFullV) ? m_cols : (m_computeThinV) ? m_diagSize : 1; \ + if (computeV()) { \ + localV.resize(ldvt, m_cols); \ + vt = (MKLTYPE*)localV.data(); \ + } else { ldvt=1; vt=&dummy; }\ + Matrix superb; superb.resize(m_diagSize, 1); \ + MatrixType m_temp; m_temp = matrix; \ + LAPACKE_##MKLPREFIX##gesvd( matrix_order, jobu, jobvt, m_rows, m_cols, (MKLTYPE*)m_temp.data(), lda, (MKLRTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \ + if (computeV()) m_matrixV = localV.adjoint(); \ + /* for(int i=0;i::bidiagonalization() const } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_BIDIAGONALIZATION_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CMakeLists.txt deleted file mode 100644 index aa1468812..000000000 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -FILE(GLOB Eigen_Sparse_SRCS "*.h") - -INSTALL(FILES - ${Eigen_Sparse_SRCS} - DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Sparse COMPONENT Devel - ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h deleted file mode 100644 index 0e175ec6e..000000000 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h +++ /dev/null @@ -1,651 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2010 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_SPARSEMATRIX_H -#define EIGEN_SPARSEMATRIX_H - -/** \ingroup Sparse_Module - * - * \class SparseMatrix - * - * \brief The main sparse matrix class - * - * This class implements a sparse matrix using the very common compressed row/column storage - * scheme. - * - * \tparam _Scalar the scalar type, i.e. the type of the coefficients - * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility - * is RowMajor. The default is 0 which means column-major. - * \tparam _Index the type of the indices. Default is \c int. - * - * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. - * - * This class can be extended with the help of the plugin mechanism described on the page - * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. - */ - -namespace internal { -template -struct traits > -{ - typedef _Scalar Scalar; - typedef _Index Index; - typedef Sparse StorageKind; - typedef MatrixXpr XprKind; - enum { - RowsAtCompileTime = Dynamic, - ColsAtCompileTime = Dynamic, - MaxRowsAtCompileTime = Dynamic, - MaxColsAtCompileTime = Dynamic, - Flags = _Options | NestByRefBit | LvalueBit, - CoeffReadCost = NumTraits::ReadCost, - SupportedAccessPatterns = InnerRandomAccessPattern - }; -}; - -} // end namespace internal - -template -class SparseMatrix - : public SparseMatrixBase > -{ - public: - EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) -// using Base::operator=; - EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=) - EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=) - // FIXME: why are these operator already alvailable ??? - // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=) - // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=) - - typedef MappedSparseMatrix Map; - using Base::IsRowMajor; - typedef CompressedStorage Storage; - enum { - Options = _Options - }; - - protected: - - typedef SparseMatrix TransposedSparseMatrix; - - Index m_outerSize; - Index m_innerSize; - Index* m_outerIndex; - CompressedStorage m_data; - - public: - - inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } - inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } - - inline Index innerSize() const { return m_innerSize; } - inline Index outerSize() const { return m_outerSize; } - inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } - - inline const Scalar* _valuePtr() const { return &m_data.value(0); } - inline Scalar* _valuePtr() { return &m_data.value(0); } - - inline const Index* _innerIndexPtr() const { return &m_data.index(0); } - inline Index* _innerIndexPtr() { return &m_data.index(0); } - - inline const Index* _outerIndexPtr() const { return m_outerIndex; } - inline Index* _outerIndexPtr() { return m_outerIndex; } - - inline Storage& data() { return m_data; } - inline const Storage& data() const { return m_data; } - - inline Scalar coeff(Index row, Index col) const - { - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; - return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner); - } - - inline Scalar& coeffRef(Index row, Index col) - { - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; - - Index start = m_outerIndex[outer]; - Index end = m_outerIndex[outer+1]; - eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); - eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); - const Index p = m_data.searchLowerIndex(start,end-1,inner); - eigen_assert((p(m_data.size()); } - - /** Preallocates \a reserveSize non zeros */ - inline void reserve(Index reserveSize) - { - m_data.reserve(reserveSize); - } - - //--- low level purely coherent filling --- - - /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that: - * - the nonzero does not already exist - * - the new coefficient is the last one according to the storage order - * - * Before filling a given inner vector you must call the statVec(Index) function. - * - * After an insertion session, you should call the finalize() function. - * - * \sa insert, insertBackByOuterInner, startVec */ - inline Scalar& insertBack(Index row, Index col) - { - return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); - } - - /** \sa insertBack, startVec */ - inline Scalar& insertBackByOuterInner(Index outer, Index inner) - { - eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); - eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)=0 && m_outerIndex[previousOuter]==0) - { - m_outerIndex[previousOuter] = static_cast(m_data.size()); - --previousOuter; - } - m_outerIndex[outer+1] = m_outerIndex[outer]; - } - - // here we have to handle the tricky case where the outerIndex array - // starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g., - // the 2nd inner vector... - bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) - && (size_t(m_outerIndex[outer+1]) == m_data.size()); - - size_t startId = m_outerIndex[outer]; - // FIXME let's make sure sizeof(long int) == sizeof(size_t) - size_t p = m_outerIndex[outer+1]; - ++m_outerIndex[outer+1]; - - float reallocRatio = 1; - if (m_data.allocatedSize()<=m_data.size()) - { - // if there is no preallocated memory, let's reserve a minimum of 32 elements - if (m_data.size()==0) - { - m_data.reserve(32); - } - else - { - // we need to reallocate the data, to reduce multiple reallocations - // we use a smart resize algorithm based on the current filling ratio - // in addition, we use float to avoid integers overflows - float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); - reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); - // furthermore we bound the realloc ratio to: - // 1) reduce multiple minor realloc when the matrix is almost filled - // 2) avoid to allocate too much memory when the matrix is almost empty - reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); - } - } - m_data.resize(m_data.size()+1,reallocRatio); - - if (!isLastVec) - { - if (previousOuter==-1) - { - // oops wrong guess. - // let's correct the outer offsets - for (Index k=0; k<=(outer+1); ++k) - m_outerIndex[k] = 0; - Index k=outer+1; - while(m_outerIndex[k]==0) - m_outerIndex[k++] = 1; - while (k<=m_outerSize && m_outerIndex[k]!=0) - m_outerIndex[k++]++; - p = 0; - --k; - k = m_outerIndex[k]-1; - while (k>0) - { - m_data.index(k) = m_data.index(k-1); - m_data.value(k) = m_data.value(k-1); - k--; - } - } - else - { - // we are not inserting into the last inner vec - // update outer indices: - Index j = outer+2; - while (j<=m_outerSize && m_outerIndex[j]!=0) - m_outerIndex[j++]++; - --j; - // shift data of last vecs: - Index k = m_outerIndex[j]-1; - while (k>=Index(p)) - { - m_data.index(k) = m_data.index(k-1); - m_data.value(k) = m_data.value(k-1); - k--; - } - } - } - - while ( (p > startId) && (m_data.index(p-1) > inner) ) - { - m_data.index(p) = m_data.index(p-1); - m_data.value(p) = m_data.value(p-1); - --p; - } - - m_data.index(p) = inner; - return (m_data.value(p) = 0); - } - - - - - /** Must be called after inserting a set of non zero entries. - */ - inline void finalize() - { - Index size = static_cast(m_data.size()); - Index i = m_outerSize; - // find the last filled column - while (i>=0 && m_outerIndex[i]==0) - --i; - ++i; - while (i<=m_outerSize) - { - m_outerIndex[i] = size; - ++i; - } - } - - /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */ - void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) - { - prune(default_prunning_func(reference,epsilon)); - } - - /** Suppress all nonzeros which do not satisfy the predicate \a keep. - * The functor type \a KeepFunc must implement the following function: - * \code - * bool operator() (const Index& row, const Index& col, const Scalar& value) const; - * \endcode - * \sa prune(Scalar,RealScalar) - */ - template - void prune(const KeepFunc& keep = KeepFunc()) - { - Index k = 0; - for(Index j=0; j - inline SparseMatrix(const SparseMatrixBase& other) - : m_outerSize(0), m_innerSize(0), m_outerIndex(0) - { - *this = other.derived(); - } - - /** Copy constructor */ - inline SparseMatrix(const SparseMatrix& other) - : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0) - { - *this = other.derived(); - } - - /** Swap the content of two sparse matrices of same type (optimization) */ - inline void swap(SparseMatrix& other) - { - //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); - std::swap(m_outerIndex, other.m_outerIndex); - std::swap(m_innerSize, other.m_innerSize); - std::swap(m_outerSize, other.m_outerSize); - m_data.swap(other.m_data); - } - - inline SparseMatrix& operator=(const SparseMatrix& other) - { -// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n"; - if (other.isRValue()) - { - swap(other.const_cast_derived()); - } - else - { - resize(other.rows(), other.cols()); - memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index)); - m_data = other.m_data; - } - return *this; - } - - #ifndef EIGEN_PARSED_BY_DOXYGEN - template - inline SparseMatrix& operator=(const SparseSparseProduct& product) - { return Base::operator=(product); } - - template - inline SparseMatrix& operator=(const ReturnByValue& other) - { return Base::operator=(other); } - - template - inline SparseMatrix& operator=(const EigenBase& other) - { return Base::operator=(other); } - #endif - - template - EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase& other) - { - const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - if (needToTranspose) - { - // two passes algorithm: - // 1 - compute the number of coeffs per dest inner vector - // 2 - do the actual copy/eval - // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed - typedef typename internal::nested::type OtherCopy; - typedef typename internal::remove_all::type _OtherCopy; - OtherCopy otherCopy(other.derived()); - - resize(other.rows(), other.cols()); - Eigen::Map > (m_outerIndex,outerSize()).setZero(); - // pass 1 - // FIXME the above copy could be merged with that pass - for (Index j=0; j::operator=(other.derived()); - } - } - - friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) - { - EIGEN_DBG_SPARSE( - s << "Nonzero entries:\n"; - for (Index i=0; i&>(m); - return s; - } - - /** Destructor */ - inline ~SparseMatrix() - { - delete[] m_outerIndex; - } - - /** Overloaded for performance */ - Scalar sum() const; - - public: - - /** \deprecated use setZero() and reserve() - * Initializes the filling process of \c *this. - * \param reserveSize approximate number of nonzeros - * Note that the matrix \c *this is zero-ed. - */ - EIGEN_DEPRECATED void startFill(Index reserveSize = 1000) - { - setZero(); - m_data.reserve(reserveSize); - } - - /** \deprecated use insert() - * Like fill() but with random inner coordinates. - */ - EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col) - { - return insert(row,col); - } - - /** \deprecated use insert() - */ - EIGEN_DEPRECATED Scalar& fill(Index row, Index col) - { - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; - - if (m_outerIndex[outer+1]==0) - { - // we start a new inner vector - Index i = outer; - while (i>=0 && m_outerIndex[i]==0) - { - m_outerIndex[i] = m_data.size(); - --i; - } - m_outerIndex[outer+1] = m_outerIndex[outer]; - } - else - { - eigen_assert(m_data.index(m_data.size()-1) -class SparseMatrix::InnerIterator -{ - public: - InnerIterator(const SparseMatrix& mat, Index outer) - : m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1]) - {} - - inline InnerIterator& operator++() { m_id++; return *this; } - - inline const Scalar& value() const { return m_values[m_id]; } - inline Scalar& valueRef() { return const_cast(m_values[m_id]); } - - inline Index index() const { return m_indices[m_id]; } - inline Index outer() const { return m_outer; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } - - inline operator bool() const { return (m_id < m_end); } - - protected: - const Scalar* m_values; - const Index* m_indices; - const Index m_outer; - Index m_id; - const Index m_end; -}; - -#endif // EIGEN_SPARSEMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h deleted file mode 100644 index 19abcd1f8..000000000 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h +++ /dev/null @@ -1,401 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2010 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_SPARSESPARSEPRODUCT_H -#define EIGEN_SPARSESPARSEPRODUCT_H - -namespace internal { - -template -static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res) -{ - typedef typename remove_all::type::Scalar Scalar; - typedef typename remove_all::type::Index Index; - - // make sure to call innerSize/outerSize since we fake the storage order. - Index rows = lhs.innerSize(); - Index cols = rhs.outerSize(); - eigen_assert(lhs.outerSize() == rhs.innerSize()); - - std::vector mask(rows,false); - Matrix values(rows); - Matrix indices(rows); - - // estimate the number of non zero entries - float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); - float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); - float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f); - -// int t200 = rows/(log2(200)*1.39); -// int t = (rows*100)/139; - - res.resize(rows, cols); - res.reserve(Index(ratioRes*rows*cols)); - // we compute each column of the result, one after the other - for (Index j=0; j use a quick sort - // otherwise => loop through the entire vector - // In order to avoid to perform an expensive log2 when the - // result is clearly very sparse we use a linear bound up to 200. -// if((nnz<200 && nnz1) std::sort(indices.data(),indices.data()+nnz); -// for(int k=0; k -static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) -{ -// return sparse_product_impl2(lhs,rhs,res); - - typedef typename remove_all::type::Scalar Scalar; - typedef typename remove_all::type::Index Index; - - // make sure to call innerSize/outerSize since we fake the storage order. - Index rows = lhs.innerSize(); - Index cols = rhs.outerSize(); - //int size = lhs.outerSize(); - eigen_assert(lhs.outerSize() == rhs.innerSize()); - - // allocate a temporary buffer - AmbiVector tempVector(rows); - - // estimate the number of non zero entries - float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); - float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); - float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f); - - // mimics a resizeByInnerOuter: - if(ResultType::IsRowMajor) - res.resize(cols, rows); - else - res.resize(rows, cols); - - res.reserve(Index(ratioRes*rows*cols)); - for (Index j=0; j::Iterator it(tempVector); it; ++it) - res.insertBackByOuterInner(j,it.index()) = it.value(); - } - res.finalize(); -} - -template::Flags&RowMajorBit, - int RhsStorageOrder = traits::Flags&RowMajorBit, - int ResStorageOrder = traits::Flags&RowMajorBit> -struct sparse_product_selector; - -template -struct sparse_product_selector -{ - typedef typename traits::type>::Scalar Scalar; - - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { -// std::cerr << __LINE__ << "\n"; - typename remove_all::type _res(res.rows(), res.cols()); - sparse_product_impl(lhs, rhs, _res); - res.swap(_res); - } -}; - -template -struct sparse_product_selector -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { -// std::cerr << __LINE__ << "\n"; - // we need a col-major matrix to hold the result - typedef SparseMatrix SparseTemporaryType; - SparseTemporaryType _res(res.rows(), res.cols()); - sparse_product_impl(lhs, rhs, _res); - res = _res; - } -}; - -template -struct sparse_product_selector -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { -// std::cerr << __LINE__ << "\n"; - // let's transpose the product to get a column x column product - typename remove_all::type _res(res.rows(), res.cols()); - sparse_product_impl(rhs, lhs, _res); - res.swap(_res); - } -}; - -template -struct sparse_product_selector -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { -// std::cerr << "here...\n"; - typedef SparseMatrix ColMajorMatrix; - ColMajorMatrix colLhs(lhs); - ColMajorMatrix colRhs(rhs); -// std::cerr << "more...\n"; - sparse_product_impl(colLhs, colRhs, res); -// std::cerr << "OK.\n"; - - // let's transpose the product to get a column x column product - -// typedef SparseMatrix SparseTemporaryType; -// SparseTemporaryType _res(res.cols(), res.rows()); -// sparse_product_impl(rhs, lhs, _res); -// res = _res.transpose(); - } -}; - -// NOTE the 2 others cases (col row *) must never occur since they are caught -// by ProductReturnType which transforms it to (col col *) by evaluating rhs. - -} // end namespace internal - -// sparse = sparse * sparse -template -template -inline Derived& SparseMatrixBase::operator=(const SparseSparseProduct& product) -{ -// std::cerr << "there..." << typeid(Lhs).name() << " " << typeid(Lhs).name() << " " << (Derived::Flags&&RowMajorBit) << "\n"; - internal::sparse_product_selector< - typename internal::remove_all::type, - typename internal::remove_all::type, - Derived>::run(product.lhs(),product.rhs(),derived()); - return derived(); -} - -namespace internal { - -template::Flags&RowMajorBit, - int RhsStorageOrder = traits::Flags&RowMajorBit, - int ResStorageOrder = traits::Flags&RowMajorBit> -struct sparse_product_selector2; - -template -struct sparse_product_selector2 -{ - typedef typename traits::type>::Scalar Scalar; - - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - sparse_product_impl2(lhs, rhs, res); - } -}; - -template -struct sparse_product_selector2 -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - // prevent warnings until the code is fixed - EIGEN_UNUSED_VARIABLE(lhs); - EIGEN_UNUSED_VARIABLE(rhs); - EIGEN_UNUSED_VARIABLE(res); - -// typedef SparseMatrix RowMajorMatrix; -// RowMajorMatrix rhsRow = rhs; -// RowMajorMatrix resRow(res.rows(), res.cols()); -// sparse_product_impl2(rhsRow, lhs, resRow); -// res = resRow; - } -}; - -template -struct sparse_product_selector2 -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - typedef SparseMatrix RowMajorMatrix; - RowMajorMatrix lhsRow = lhs; - RowMajorMatrix resRow(res.rows(), res.cols()); - sparse_product_impl2(rhs, lhsRow, resRow); - res = resRow; - } -}; - -template -struct sparse_product_selector2 -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - typedef SparseMatrix RowMajorMatrix; - RowMajorMatrix resRow(res.rows(), res.cols()); - sparse_product_impl2(rhs, lhs, resRow); - res = resRow; - } -}; - - -template -struct sparse_product_selector2 -{ - typedef typename traits::type>::Scalar Scalar; - - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - typedef SparseMatrix ColMajorMatrix; - ColMajorMatrix resCol(res.rows(), res.cols()); - sparse_product_impl2(lhs, rhs, resCol); - res = resCol; - } -}; - -template -struct sparse_product_selector2 -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - typedef SparseMatrix ColMajorMatrix; - ColMajorMatrix lhsCol = lhs; - ColMajorMatrix resCol(res.rows(), res.cols()); - sparse_product_impl2(lhsCol, rhs, resCol); - res = resCol; - } -}; - -template -struct sparse_product_selector2 -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - typedef SparseMatrix ColMajorMatrix; - ColMajorMatrix rhsCol = rhs; - ColMajorMatrix resCol(res.rows(), res.cols()); - sparse_product_impl2(lhs, rhsCol, resCol); - res = resCol; - } -}; - -template -struct sparse_product_selector2 -{ - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) - { - typedef SparseMatrix ColMajorMatrix; -// ColMajorMatrix lhsTr(lhs); -// ColMajorMatrix rhsTr(rhs); -// ColMajorMatrix aux(res.rows(), res.cols()); -// sparse_product_impl2(rhs, lhs, aux); -// // ColMajorMatrix aux2 = aux.transpose(); -// res = aux; - typedef SparseMatrix ColMajorMatrix; - ColMajorMatrix lhsCol(lhs); - ColMajorMatrix rhsCol(rhs); - ColMajorMatrix resCol(res.rows(), res.cols()); - sparse_product_impl2(lhsCol, rhsCol, resCol); - res = resCol; - } -}; - -} // end namespace internal - -template -template -inline void SparseMatrixBase::_experimentalNewProduct(const Lhs& lhs, const Rhs& rhs) -{ - //derived().resize(lhs.rows(), rhs.cols()); - internal::sparse_product_selector2< - typename internal::remove_all::type, - typename internal::remove_all::type, - Derived>::run(lhs,rhs,derived()); -} - -// sparse * sparse -template -template -inline const typename SparseSparseProductReturnType::Type -SparseMatrixBase::operator*(const SparseMatrixBase &other) const -{ - return typename SparseSparseProductReturnType::Type(derived(), other.derived()); -} - -#endif // EIGEN_SPARSESPARSEPRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTriangularView.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTriangularView.h deleted file mode 100644 index 319eaf066..000000000 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTriangularView.h +++ /dev/null @@ -1,100 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2009 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H -#define EIGEN_SPARSE_TRIANGULARVIEW_H - -namespace internal { - -template -struct traits > -: public traits -{}; - -} // namespace internal - -template class SparseTriangularView - : public SparseMatrixBase > -{ - enum { SkipFirst = (Mode==Lower && !(MatrixType::Flags&RowMajorBit)) - || (Mode==Upper && (MatrixType::Flags&RowMajorBit)) }; - public: - - EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView) - - class InnerIterator; - - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } - - typedef typename internal::conditional::ret, - MatrixType, const MatrixType&>::type MatrixTypeNested; - - inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {} - - /** \internal */ - inline const MatrixType& nestedExpression() const { return m_matrix; } - - template - typename internal::plain_matrix_type_column_major::type - solve(const MatrixBase& other) const; - - template void solveInPlace(MatrixBase& other) const; - template void solveInPlace(SparseMatrixBase& other) const; - - protected: - MatrixTypeNested m_matrix; -}; - -template -class SparseTriangularView::InnerIterator : public MatrixType::InnerIterator -{ - typedef typename MatrixType::InnerIterator Base; - public: - - EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer) - : Base(view.nestedExpression(), outer) - { - if(SkipFirst) - while((*this) && this->index()index() <= this->outer()); - } -}; - -template -template -inline const SparseTriangularView -SparseMatrixBase::triangularView() const -{ - return derived(); -} - -#endif // EIGEN_SPARSE_TRIANGULARVIEW_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/CMakeLists.txt new file mode 100644 index 000000000..375a59d7a --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_SparseCholesky_SRCS "*.h") + +INSTALL(FILES + ${Eigen_SparseCholesky_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCholesky COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h new file mode 100644 index 000000000..e5d98933f --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -0,0 +1,886 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +/* + +NOTE: the _symbolic, and _numeric functions has been adapted from + the LDL library: + +LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. + +LDL License: + + Your use or distribution of LDL or any modified version of + LDL implies that you agree to this License. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 + USA + + Permission is hereby granted to use or copy this program under the + terms of the GNU LGPL, provided that the Copyright, this License, + and the Availability of the original version is retained on all copies. + User documentation of any code that uses this code or any modified + version of this code must cite the Copyright, this License, the + Availability note, and "Used by permission." Permission to modify + the code and to distribute modified code is granted, provided the + Copyright, this License, and the Availability note are retained, + and a notice that the code was modified is included. + */ + +#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H +#define EIGEN_SIMPLICIAL_CHOLESKY_H + +namespace Eigen { + +enum SimplicialCholeskyMode { + SimplicialCholeskyLLT, + SimplicialCholeskyLDLT +}; + +/** \ingroup SparseCholesky_Module + * \brief A direct sparse Cholesky factorizations + * + * These classes provide LL^T and LDL^T Cholesky factorizations of sparse matrices that are + * selfadjoint and positive definite. The factorization allows for solving A.X = B where + * X and B can be either dense or sparse. + * + * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization + * such that the factorized matrix is P A P^-1. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + */ +template +class SimplicialCholeskyBase : internal::noncopyable +{ + public: + typedef typename internal::traits::MatrixType MatrixType; + enum { UpLo = internal::traits::UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + + public: + + /** Default constructor */ + SimplicialCholeskyBase() + : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1) + {} + + SimplicialCholeskyBase(const MatrixType& matrix) + : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1) + { + derived().compute(matrix); + } + + ~SimplicialCholeskyBase() + { + } + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + inline Index cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized."); + eigen_assert(rows()==b.rows() + && "SimplicialCholeskyBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::sparse_solve_retval + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized."); + eigen_assert(rows()==b.rows() + && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b"); + return internal::sparse_solve_retval(*this, b.derived()); + } + + /** \returns the permutation P + * \sa permutationPinv() */ + const PermutationMatrix& permutationP() const + { return m_P; } + + /** \returns the inverse P^-1 of the permutation P + * \sa permutationP() */ + const PermutationMatrix& permutationPinv() const + { return m_Pinv; } + + /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization. + * + * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n + * \c d_ii = \a offset + \a scale * \c d_ii + * + * The default is the identity transformation with \a offset=0, and \a scale=1. + * + * \returns a reference to \c *this. + */ + Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1) + { + m_shiftOffset = offset; + m_shiftScale = scale; + return derived(); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void dumpMemory(Stream& s) + { + int total = 0; + s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n"; + s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n"; + s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n"; + s << " TOTAL: " << (total>> 20) << "Mb" << "\n"; + } + + /** \internal */ + template + void _solve(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + eigen_assert(m_matrix.rows()==b.rows()); + + if(m_info!=Success) + return; + + if(m_P.size()>0) + dest = m_P * b; + else + dest = b; + + if(m_matrix.nonZeros()>0) // otherwise L==I + derived().matrixL().solveInPlace(dest); + + if(m_diag.size()>0) + dest = m_diag.asDiagonal().inverse() * dest; + + if (m_matrix.nonZeros()>0) // otherwise U==I + derived().matrixU().solveInPlace(dest); + + if(m_P.size()>0) + dest = m_Pinv * dest; + } + + /** \internal */ + template + void _solve_sparse(const Rhs& b, SparseMatrix &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + eigen_assert(m_matrix.rows()==b.rows()); + + // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. + static const int NbColsAtOnce = 4; + int rhsCols = b.cols(); + int size = b.rows(); + Eigen::Matrix tmp(size,rhsCols); + for(int k=0; k(rhsCols-k, NbColsAtOnce); + tmp.leftCols(actualCols) = b.middleCols(k,actualCols); + tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols)); + dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView(); + } + } + +#endif // EIGEN_PARSED_BY_DOXYGEN + + protected: + + /** Computes the sparse Cholesky decomposition of \a matrix */ + template + void compute(const MatrixType& matrix) + { + eigen_assert(matrix.rows()==matrix.cols()); + Index size = matrix.cols(); + CholMatrixType ap(size,size); + ordering(matrix, ap); + analyzePattern_preordered(ap, DoLDLT); + factorize_preordered(ap); + } + + template + void factorize(const MatrixType& a) + { + eigen_assert(a.rows()==a.cols()); + int size = a.cols(); + CholMatrixType ap(size,size); + ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); + factorize_preordered(ap); + } + + template + void factorize_preordered(const CholMatrixType& a); + + void analyzePattern(const MatrixType& a, bool doLDLT) + { + eigen_assert(a.rows()==a.cols()); + int size = a.cols(); + CholMatrixType ap(size,size); + ordering(a, ap); + analyzePattern_preordered(ap,doLDLT); + } + void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT); + + void ordering(const MatrixType& a, CholMatrixType& ap); + + /** keeps off-diagonal entries; drops diagonal entries */ + struct keep_diag { + inline bool operator() (const Index& row, const Index& col, const Scalar&) const + { + return row!=col; + } + }; + + mutable ComputationInfo m_info; + bool m_isInitialized; + bool m_factorizationIsOk; + bool m_analysisIsOk; + + CholMatrixType m_matrix; + VectorType m_diag; // the diagonal coefficients (LDLT mode) + VectorXi m_parent; // elimination tree + VectorXi m_nonZerosPerCol; + PermutationMatrix m_P; // the permutation + PermutationMatrix m_Pinv; // the inverse permutation + + RealScalar m_shiftOffset; + RealScalar m_shiftScale; +}; + +template class SimplicialLLT; +template class SimplicialLDLT; +template class SimplicialCholesky; + +namespace internal { + +template struct traits > +{ + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef SparseMatrix CholMatrixType; + typedef SparseTriangularView MatrixL; + typedef SparseTriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m; } + static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } +}; + +template struct traits > +{ + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef SparseMatrix CholMatrixType; + typedef SparseTriangularView MatrixL; + typedef SparseTriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m; } + static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } +}; + +template struct traits > +{ + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; +}; + +} + +/** \ingroup SparseCholesky_Module + * \class SimplicialLLT + * \brief A direct sparse LLT Cholesky factorizations + * + * This class provides a LL^T Cholesky factorizations of sparse matrices that are + * selfadjoint and positive definite. The factorization allows for solving A.X = B where + * X and B can be either dense or sparse. + * + * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization + * such that the factorized matrix is P A P^-1. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \sa class SimplicialLDLT + */ +template + class SimplicialLLT : public SimplicialCholeskyBase > +{ +public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef SimplicialCholeskyBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + typedef internal::traits Traits; + typedef typename Traits::MatrixL MatrixL; + typedef typename Traits::MatrixU MatrixU; +public: + /** Default constructor */ + SimplicialLLT() : Base() {} + /** Constructs and performs the LLT factorization of \a matrix */ + SimplicialLLT(const MatrixType& matrix) + : Base(matrix) {} + + /** \returns an expression of the factor L */ + inline const MatrixL matrixL() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); + return Traits::getL(Base::m_matrix); + } + + /** \returns an expression of the factor U (= L^*) */ + inline const MatrixU matrixU() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized"); + return Traits::getU(Base::m_matrix); + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + SimplicialLLT& compute(const MatrixType& matrix) + { + Base::template compute(matrix); + return *this; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& a) + { + Base::analyzePattern(a, false); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& a) + { + Base::template factorize(a); + } + + /** \returns the determinant of the underlying matrix from the current factorization */ + Scalar determinant() const + { + Scalar detL = Base::m_matrix.diagonal().prod(); + return internal::abs2(detL); + } +}; + +/** \ingroup SparseCholesky_Module + * \class SimplicialLDLT + * \brief A direct sparse LDLT Cholesky factorizations without square root. + * + * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are + * selfadjoint and positive definite. The factorization allows for solving A.X = B where + * X and B can be either dense or sparse. + * + * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization + * such that the factorized matrix is P A P^-1. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * \sa class SimplicialLLT + */ +template + class SimplicialLDLT : public SimplicialCholeskyBase > +{ +public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef SimplicialCholeskyBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + typedef internal::traits Traits; + typedef typename Traits::MatrixL MatrixL; + typedef typename Traits::MatrixU MatrixU; +public: + /** Default constructor */ + SimplicialLDLT() : Base() {} + + /** Constructs and performs the LLT factorization of \a matrix */ + SimplicialLDLT(const MatrixType& matrix) + : Base(matrix) {} + + /** \returns a vector expression of the diagonal D */ + inline const VectorType vectorD() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); + return Base::m_diag; + } + /** \returns an expression of the factor L */ + inline const MatrixL matrixL() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); + return Traits::getL(Base::m_matrix); + } + + /** \returns an expression of the factor U (= L^*) */ + inline const MatrixU matrixU() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized"); + return Traits::getU(Base::m_matrix); + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + SimplicialLDLT& compute(const MatrixType& matrix) + { + Base::template compute(matrix); + return *this; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& a) + { + Base::analyzePattern(a, true); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& a) + { + Base::template factorize(a); + } + + /** \returns the determinant of the underlying matrix from the current factorization */ + Scalar determinant() const + { + return Base::m_diag.prod(); + } +}; + +/** \deprecated use SimplicialLDLT or class SimplicialLLT + * \ingroup SparseCholesky_Module + * \class SimplicialCholesky + * + * \sa class SimplicialLDLT, class SimplicialLLT + */ +template + class SimplicialCholesky : public SimplicialCholeskyBase > +{ +public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef SimplicialCholeskyBase Base; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + typedef internal::traits Traits; + typedef internal::traits > LDLTTraits; + typedef internal::traits > LLTTraits; + public: + SimplicialCholesky() : Base(), m_LDLT(true) {} + + SimplicialCholesky(const MatrixType& matrix) + : Base(), m_LDLT(true) + { + compute(matrix); + } + + SimplicialCholesky& setMode(SimplicialCholeskyMode mode) + { + switch(mode) + { + case SimplicialCholeskyLLT: + m_LDLT = false; + break; + case SimplicialCholeskyLDLT: + m_LDLT = true; + break; + default: + break; + } + + return *this; + } + + inline const VectorType vectorD() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); + return Base::m_diag; + } + inline const CholMatrixType rawMatrix() const { + eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized"); + return Base::m_matrix; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + SimplicialCholesky& compute(const MatrixType& matrix) + { + if(m_LDLT) + Base::template compute(matrix); + else + Base::template compute(matrix); + return *this; + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& a) + { + Base::analyzePattern(a, m_LDLT); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& a) + { + if(m_LDLT) + Base::template factorize(a); + else + Base::template factorize(a); + } + + /** \internal */ + template + void _solve(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + eigen_assert(Base::m_matrix.rows()==b.rows()); + + if(Base::m_info!=Success) + return; + + if(Base::m_P.size()>0) + dest = Base::m_P * b; + else + dest = b; + + if(Base::m_matrix.nonZeros()>0) // otherwise L==I + { + if(m_LDLT) + LDLTTraits::getL(Base::m_matrix).solveInPlace(dest); + else + LLTTraits::getL(Base::m_matrix).solveInPlace(dest); + } + + if(Base::m_diag.size()>0) + dest = Base::m_diag.asDiagonal().inverse() * dest; + + if (Base::m_matrix.nonZeros()>0) // otherwise I==I + { + if(m_LDLT) + LDLTTraits::getU(Base::m_matrix).solveInPlace(dest); + else + LLTTraits::getU(Base::m_matrix).solveInPlace(dest); + } + + if(Base::m_P.size()>0) + dest = Base::m_Pinv * dest; + } + + Scalar determinant() const + { + if(m_LDLT) + { + return Base::m_diag.prod(); + } + else + { + Scalar detL = Diagonal(Base::m_matrix).prod(); + return internal::abs2(detL); + } + } + + protected: + bool m_LDLT; +}; + +template +void SimplicialCholeskyBase::ordering(const MatrixType& a, CholMatrixType& ap) +{ + eigen_assert(a.rows()==a.cols()); + const Index size = a.rows(); + // TODO allows to configure the permutation + // Note that amd compute the inverse permutation + { + CholMatrixType C; + C = a.template selfadjointView(); + // remove diagonal entries: + // seems not to be needed + // C.prune(keep_diag()); + internal::minimum_degree_ordering(C, m_Pinv); + } + + if(m_Pinv.size()>0) + m_P = m_Pinv.inverse(); + else + m_P.resize(0); + + ap.resize(size,size); + ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_P); +} + +template +void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) +{ + const Index size = ap.rows(); + m_matrix.resize(size, size); + m_parent.resize(size); + m_nonZerosPerCol.resize(size); + + ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); + + for(Index k = 0; k < size; ++k) + { + /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ + m_parent[k] = -1; /* parent of k is not yet known */ + tags[k] = k; /* mark node k as visited */ + m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ + for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) + { + Index i = it.index(); + if(i < k) + { + /* follow path from i to root of etree, stop at flagged node */ + for(; tags[i] != k; i = m_parent[i]) + { + /* find parent of i if not yet determined */ + if (m_parent[i] == -1) + m_parent[i] = k; + m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ + tags[i] = k; /* mark i as visited */ + } + } + } + } + + /* construct Lp index array from m_nonZerosPerCol column counts */ + Index* Lp = m_matrix.outerIndexPtr(); + Lp[0] = 0; + for(Index k = 0; k < size; ++k) + Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); + + m_matrix.resizeNonZeros(Lp[size]); + + m_isInitialized = true; + m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; +} + + +template +template +void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& ap) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + eigen_assert(ap.rows()==ap.cols()); + const Index size = ap.rows(); + eigen_assert(m_parent.size()==size); + eigen_assert(m_nonZerosPerCol.size()==size); + + const Index* Lp = m_matrix.outerIndexPtr(); + Index* Li = m_matrix.innerIndexPtr(); + Scalar* Lx = m_matrix.valuePtr(); + + ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); + ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0); + ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); + + bool ok = true; + m_diag.resize(DoLDLT ? size : 0); + + for(Index k = 0; k < size; ++k) + { + // compute nonzero pattern of kth row of L, in topological order + y[k] = 0.0; // Y(0:k) is now all zero + Index top = size; // stack for pattern is empty + tags[k] = k; // mark node k as visited + m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L + for(typename MatrixType::InnerIterator it(ap,k); it; ++it) + { + Index i = it.index(); + if(i <= k) + { + y[i] += internal::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ + Index len; + for(len = 0; tags[i] != k; i = m_parent[i]) + { + pattern[len++] = i; /* L(k,i) is nonzero */ + tags[i] = k; /* mark i as visited */ + } + while(len > 0) + pattern[--top] = pattern[--len]; + } + } + + /* compute numerical values kth row of L (a sparse triangular solve) */ + + RealScalar d = internal::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k) + y[k] = 0.0; + for(; top < size; ++top) + { + Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + Scalar yi = y[i]; /* get and clear Y(i) */ + y[i] = 0.0; + + /* the nonzero entry L(k,i) */ + Scalar l_ki; + if(DoLDLT) + l_ki = yi / m_diag[i]; + else + yi = l_ki = yi / Lx[Lp[i]]; + + Index p2 = Lp[i] + m_nonZerosPerCol[i]; + Index p; + for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) + y[Li[p]] -= internal::conj(Lx[p]) * yi; + d -= internal::real(l_ki * internal::conj(yi)); + Li[p] = k; /* store L(k,i) in column form of L */ + Lx[p] = l_ki; + ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ + } + if(DoLDLT) + { + m_diag[k] = d; + if(d == RealScalar(0)) + { + ok = false; /* failure, D(k,k) is zero */ + break; + } + } + else + { + Index p = Lp[k] + m_nonZerosPerCol[k]++; + Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ + if(d <= RealScalar(0)) { + ok = false; /* failure, matrix is not positive definite */ + break; + } + Lx[p] = internal::sqrt(d) ; + } + } + + m_info = ok ? Success : NumericalIssue; + m_factorizationIsOk = true; +} + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef SimplicialCholeskyBase Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve(rhs(),dst); + } +}; + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef SimplicialCholeskyBase Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve_sparse(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SIMPLICIAL_CHOLESKY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/AmbiVector.h similarity index 98% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/AmbiVector.h index 2ea8ba309..8ec63107a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/AmbiVector.h @@ -25,6 +25,10 @@ #ifndef EIGEN_AMBIVECTOR_H #define EIGEN_AMBIVECTOR_H +namespace Eigen { + +namespace internal { + /** \internal * Hybrid sparse/dense vector class designed for intensive read-write operations. * @@ -299,7 +303,7 @@ class AmbiVector<_Scalar,_Index>::Iterator * In practice, all coefficients having a magnitude smaller than \a epsilon * are skipped. */ - Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*NumTraits::dummy_precision()) + Iterator(const AmbiVector& vec, RealScalar epsilon = 0) : m_vector(vec) { m_epsilon = epsilon; @@ -315,7 +319,7 @@ class AmbiVector<_Scalar,_Index>::Iterator { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); m_currentEl = m_vector.m_llStart; - while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon) m_currentEl = llElements[m_currentEl].next; if (m_currentEl<0) { @@ -375,5 +379,8 @@ class AmbiVector<_Scalar,_Index>::Iterator bool m_isDense; // mode of the vector }; +} // end namespace internal + +} // end namespace Eigen #endif // EIGEN_AMBIVECTOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CMakeLists.txt new file mode 100644 index 000000000..d860452a6 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_SparseCore_SRCS "*.h") + +INSTALL(FILES + ${Eigen_SparseCore_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCore COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h similarity index 95% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h index b3bde272e..fa2bfd763 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h @@ -25,7 +25,12 @@ #ifndef EIGEN_COMPRESSED_STORAGE_H #define EIGEN_COMPRESSED_STORAGE_H -/** Stores a sparse set of values as a list of values and a list of indices. +namespace Eigen { + +namespace internal { + +/** \internal + * Stores a sparse set of values as a list of values and a list of indices. * */ template @@ -218,8 +223,8 @@ class CompressedStorage Index* newIndices = new Index[size]; size_t copySize = (std::min)(size, m_size); // copy - memcpy(newValues, m_values, copySize * sizeof(Scalar)); - memcpy(newIndices, m_indices, copySize * sizeof(Index)); + internal::smart_copy(m_values, m_values+copySize, newValues); + internal::smart_copy(m_indices, m_indices+copySize, newIndices); // delete old stuff delete[] m_values; delete[] m_indices; @@ -236,4 +241,8 @@ class CompressedStorage }; +} // end namespace internal + +} // end namespace Eigen + #endif // EIGEN_COMPRESSED_STORAGE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h new file mode 100644 index 000000000..0fb4c1c97 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -0,0 +1,260 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H +#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H + +namespace Eigen { + +namespace internal { + +template +static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) +{ + typedef typename remove_all::type::Scalar Scalar; + typedef typename remove_all::type::Index Index; + + // make sure to call innerSize/outerSize since we fake the storage order. + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); + eigen_assert(lhs.outerSize() == rhs.innerSize()); + + std::vector mask(rows,false); + Matrix values(rows); + Matrix indices(rows); + + // estimate the number of non zero entries + // given a rhs column containing Y non zeros, we assume that the respective Y columns + // of the lhs differs in average of one non zeros, thus the number of non zeros for + // the product of a rhs column with the lhs is X+Y where X is the average number of non zero + // per column of the lhs. + // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) + Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); + + res.setZero(); + res.reserve(Index(estimated_nnz_prod)); + // we compute each column of the result, one after the other + for (Index j=0; j use a quick sort + // otherwise => loop through the entire vector + // In order to avoid to perform an expensive log2 when the + // result is clearly very sparse we use a linear bound up to 200. + //if((nnz<200 && nnz1) std::sort(indices.data(),indices.data()+nnz); + for(int k=0; k::Flags&RowMajorBit, + int RhsStorageOrder = traits::Flags&RowMajorBit, + int ResStorageOrder = traits::Flags&RowMajorBit> +struct conservative_sparse_sparse_product_selector; + +template +struct conservative_sparse_sparse_product_selector +{ + typedef typename remove_all::type LhsCleaned; + typedef typename LhsCleaned::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; + ColMajorMatrix resCol(lhs.rows(),rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); + // sort the non zeros: + RowMajorMatrix resRow(resCol); + res = resRow; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + RowMajorMatrix rhsRow = rhs; + RowMajorMatrix resRow(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); + res = resRow; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + RowMajorMatrix lhsRow = lhs; + RowMajorMatrix resRow(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); + res = resRow; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + RowMajorMatrix resRow(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); + res = resRow; + } +}; + + +template +struct conservative_sparse_sparse_product_selector +{ + typedef typename traits::type>::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorMatrix; + ColMajorMatrix resCol(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); + res = resCol; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorMatrix; + ColMajorMatrix lhsCol = lhs; + ColMajorMatrix resCol(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); + res = resCol; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix ColMajorMatrix; + ColMajorMatrix rhsCol = rhs; + ColMajorMatrix resCol(lhs.rows(), rhs.cols()); + internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); + res = resCol; + } +}; + +template +struct conservative_sparse_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; + RowMajorMatrix resRow(lhs.rows(),rhs.cols()); + internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); + // sort the non zeros: + ColMajorMatrix resCol(resRow); + res = resCol; + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CoreIterators.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CoreIterators.h similarity index 96% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/CoreIterators.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CoreIterators.h index b4beaeee6..ea51e9231 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CoreIterators.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CoreIterators.h @@ -25,10 +25,13 @@ #ifndef EIGEN_COREITERATORS_H #define EIGEN_COREITERATORS_H +namespace Eigen { + /* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core */ -/** \class InnerIterator +/** \ingroup SparseCore_Module + * \class InnerIterator * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression * * todo @@ -68,4 +71,6 @@ template class DenseBase::InnerIterator const Index m_end; }; +} // end namespace Eigen + #endif // EIGEN_COREITERATORS_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/MappedSparseMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/MappedSparseMatrix.h similarity index 74% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/MappedSparseMatrix.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/MappedSparseMatrix.h index 31a431fb2..fc7f9d143 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/MappedSparseMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MAPPED_SPARSEMATRIX_H #define EIGEN_MAPPED_SPARSEMATRIX_H +namespace Eigen { + /** \class MappedSparseMatrix * * \brief Sparse matrix @@ -46,9 +48,9 @@ class MappedSparseMatrix { public: EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix) + enum { IsRowMajor = Base::IsRowMajor }; protected: - enum { IsRowMajor = Base::IsRowMajor }; Index m_outerSize; Index m_innerSize; @@ -63,18 +65,17 @@ class MappedSparseMatrix inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } - inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } //---------------------------------------- // direct access interface - inline const Scalar* _valuePtr() const { return m_values; } - inline Scalar* _valuePtr() { return m_values; } + inline const Scalar* valuePtr() const { return m_values; } + inline Scalar* valuePtr() { return m_values; } - inline const Index* _innerIndexPtr() const { return m_innerIndices; } - inline Index* _innerIndexPtr() { return m_innerIndices; } + inline const Index* innerIndexPtr() const { return m_innerIndices; } + inline Index* innerIndexPtr() { return m_innerIndices; } - inline const Index* _outerIndexPtr() const { return m_outerIndex; } - inline Index* _outerIndexPtr() { return m_outerIndex; } + inline const Index* outerIndexPtr() const { return m_outerIndex; } + inline Index* outerIndexPtr() { return m_outerIndex; } //---------------------------------------- inline Scalar coeff(Index row, Index col) const @@ -112,6 +113,7 @@ class MappedSparseMatrix } class InnerIterator; + class ReverseInnerIterator; /** \returns the number of non zero coefficients */ inline Index nonZeros() const { return m_nnz; } @@ -132,23 +134,17 @@ class MappedSparseMatrix::InnerIterator InnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), - m_id(mat._outerIndexPtr()[outer]), + m_id(mat.outerIndexPtr()[outer]), m_start(m_id), - m_end(mat._outerIndexPtr()[outer+1]) - {} - - template - InnerIterator(const Flagged& mat, Index outer) - : m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]), - m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1]) + m_end(mat.outerIndexPtr()[outer+1]) {} inline InnerIterator& operator++() { m_id++; return *this; } - inline Scalar value() const { return m_matrix._valuePtr()[m_id]; } - inline Scalar& valueRef() { return const_cast(m_matrix._valuePtr()[m_id]); } + inline Scalar value() const { return m_matrix.valuePtr()[m_id]; } + inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id]); } - inline Index index() const { return m_matrix._innerIndexPtr()[m_id]; } + inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } @@ -162,4 +158,37 @@ class MappedSparseMatrix::InnerIterator const Index m_end; }; +template +class MappedSparseMatrix::ReverseInnerIterator +{ + public: + ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer) + : m_matrix(mat), + m_outer(outer), + m_id(mat.outerIndexPtr()[outer+1]), + m_start(mat.outerIndexPtr()[outer]), + m_end(m_id) + {} + + inline ReverseInnerIterator& operator--() { m_id--; return *this; } + + inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; } + inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id-1]); } + + inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } + + inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); } + + protected: + const MappedSparseMatrix& m_matrix; + const Index m_outer; + Index m_id; + const Index m_start; + const Index m_end; +}; + +} // end namespace Eigen + #endif // EIGEN_MAPPED_SPARSEMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseAssign.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseAssign.h similarity index 100% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseAssign.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseAssign.h diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseBlock.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h similarity index 71% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseBlock.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h index 8079c9999..189538f39 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseBlock.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSE_BLOCK_H #define EIGEN_SPARSE_BLOCK_H +namespace Eigen { + namespace internal { template struct traits > @@ -65,6 +67,17 @@ class SparseInnerVectorSet : internal::no_assignment_operator, protected: Index m_outer; }; + class ReverseInnerIterator: public MatrixType::ReverseInnerIterator + { + public: + inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer) + : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) @@ -101,15 +114,16 @@ class SparseInnerVectorSet : internal::no_assignment_operator, const internal::variable_if_dynamic m_outerSize; }; + /*************************************************************************** -* specialisation for DynamicSparseMatrix +* specialisation for SparseMatrix ***************************************************************************/ -template -class SparseInnerVectorSet, Size> - : public SparseMatrixBase, Size> > +template +class SparseInnerVectorSet, Size> + : public SparseMatrixBase, Size> > { - typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType; + typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; @@ -126,98 +140,11 @@ class SparseInnerVectorSet, Size> protected: Index m_outer; }; - - inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) - : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) - { - eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); - } - - inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) - : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) - { - eigen_assert(Size!=Dynamic); - eigen_assert( (outer>=0) && (outer - inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) - { - if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) - { - // need to transpose => perform a block evaluation followed by a big swap - DynamicSparseMatrix aux(other); - *this = aux.markAsRValue(); - } - else - { - // evaluate/copy vector per vector - for (Index j=0; j aux(other.innerVector(j)); - m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); - } - } - return *this; - } - - inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) - { - return operator=(other); - } - - Index nonZeros() const - { - Index count = 0; - for (Index j=0; j0); - return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1); - } - -// template -// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) -// { -// return *this; -// } - - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - - protected: - - const typename MatrixType::Nested m_matrix; - Index m_outerStart; - const internal::variable_if_dynamic m_outerSize; - -}; - - -/*************************************************************************** -* specialisation for SparseMatrix -***************************************************************************/ - -template -class SparseInnerVectorSet, Size> - : public SparseMatrixBase, Size> > -{ - typedef SparseMatrix<_Scalar, _Options> MatrixType; - public: - - enum { IsRowMajor = internal::traits::IsRowMajor }; - - EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) - class InnerIterator: public MatrixType::InnerIterator + class ReverseInnerIterator: public MatrixType::ReverseInnerIterator { public: - inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) - : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer) + : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } @@ -243,19 +170,19 @@ class SparseInnerVectorSet, Size> { typedef typename internal::remove_all::type _NestedMatrixType; _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; - // This assignement is slow if this vector set not empty + // This assignement is slow if this vector set is not empty // and/or it is not at the end of the nonzeros of the underlying matrix. // 1 - eval to a temporary to avoid transposition and/or aliasing issues SparseMatrix tmp(other); // 2 - let's check whether there is enough allocated memory - Index nnz = tmp.nonZeros(); - Index nnz_previous = nonZeros(); - Index free_size = matrix.data().allocatedSize() - nnz_previous; - std::size_t nnz_head = m_outerStart==0 ? 0 : matrix._outerIndexPtr()[m_outerStart]; - std::size_t tail = m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()]; - std::size_t nnz_tail = matrix.nonZeros() - tail; + Index nnz = tmp.nonZeros(); + Index nnz_previous = nonZeros(); + Index free_size = Index(matrix.data().allocatedSize()) + nnz_previous; + Index nnz_head = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; + Index tail = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; + Index nnz_tail = matrix.nonZeros() - tail; if(nnz>free_size) { @@ -298,15 +225,15 @@ class SparseInnerVectorSet, Size> // update outer index pointers Index p = nnz_head; - for(Index k=1; k, Size> return operator=(other); } - inline const Scalar* _valuePtr() const - { return m_matrix._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; } - inline Scalar* _valuePtr() - { return m_matrix.const_cast_derived()._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; } + inline const Scalar* valuePtr() const + { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + inline Scalar* valuePtr() + { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - inline const Index* _innerIndexPtr() const - { return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; } - inline Index* _innerIndexPtr() - { return m_matrix.const_cast_derived()._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; } + inline const Index* innerIndexPtr() const + { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + inline Index* innerIndexPtr() + { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - inline const Index* _outerIndexPtr() const - { return m_matrix._outerIndexPtr() + m_outerStart; } - inline Index* _outerIndexPtr() - { return m_matrix.const_cast_derived()._outerIndexPtr() + m_outerStart; } + inline const Index* outerIndexPtr() const + { return m_matrix.outerIndexPtr() + m_outerStart; } + inline Index* outerIndexPtr() + { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; } Index nonZeros() const { - return std::size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()]) - - std::size_t(m_matrix._outerIndexPtr()[m_outerStart]); + if(m_matrix.isCompressed()) + return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) + - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); + else if(m_outerSize.value()==0) + return 0; + else + return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet); eigen_assert(nonZeros()>0); - return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart+1]-1]; + if(m_matrix.isCompressed()) + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; + else + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } // template @@ -356,7 +291,7 @@ class SparseInnerVectorSet, Size> protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; @@ -412,11 +347,9 @@ template const SparseInnerVectorSet SparseMatrixBase::innerVector(Index outer) const { return SparseInnerVectorSet(derived(), outer); } -//---------- - /** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ template -SparseInnerVectorSet SparseMatrixBase::subrows(Index start, Index size) +SparseInnerVectorSet SparseMatrixBase::middleRows(Index start, Index size) { EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); return innerVectors(start, size); @@ -425,7 +358,7 @@ SparseInnerVectorSet SparseMatrixBase::subrows(Index s /** \returns the i-th row of the matrix \c *this. For row-major matrix only. * (read-only version) */ template -const SparseInnerVectorSet SparseMatrixBase::subrows(Index start, Index size) const +const SparseInnerVectorSet SparseMatrixBase::middleRows(Index start, Index size) const { EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); return innerVectors(start, size); @@ -433,7 +366,7 @@ const SparseInnerVectorSet SparseMatrixBase::subrows(I /** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ template -SparseInnerVectorSet SparseMatrixBase::subcols(Index start, Index size) +SparseInnerVectorSet SparseMatrixBase::middleCols(Index start, Index size) { EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); return innerVectors(start, size); @@ -442,12 +375,14 @@ SparseInnerVectorSet SparseMatrixBase::subcols(Index s /** \returns the i-th column of the matrix \c *this. For column-major matrix only. * (read-only version) */ template -const SparseInnerVectorSet SparseMatrixBase::subcols(Index start, Index size) const +const SparseInnerVectorSet SparseMatrixBase::middleCols(Index start, Index size) const { EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); return innerVectors(start, size); } + + /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). */ @@ -462,4 +397,6 @@ template const SparseInnerVectorSet SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const { return SparseInnerVectorSet(derived(), outerStart, outerSize); } +} // end namespace Eigen + #endif // EIGEN_SPARSE_BLOCK_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h similarity index 84% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseCwiseBinaryOp.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index cde5bbc03..28167066a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseCwiseBinaryOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H #define EIGEN_SPARSE_CWISE_BINARY_OP_H +namespace Eigen { + // Here we have to handle 3 cases: // 1 - sparse op dense // 2 - dense op sparse @@ -63,8 +65,18 @@ class CwiseBinaryOpImpl { public: class InnerIterator; + class ReverseInnerIterator; typedef CwiseBinaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) + CwiseBinaryOpImpl() + { + typedef typename internal::traits::StorageKind LhsStorageKind; + typedef typename internal::traits::StorageKind RhsStorageKind; + EIGEN_STATIC_ASSERT(( + (!internal::is_same::value) + || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))), + THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH); + } }; template @@ -76,7 +88,7 @@ class CwiseBinaryOpImpl::InnerIterator typedef internal::sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; - EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) + EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer) : Base(binOp.derived(),outer) {} }; @@ -246,7 +258,7 @@ class sparse_cwise_binary_op_inner_iterator_selector, Lhs, EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } protected: - const RhsNested m_rhs; + RhsNested m_rhs; LhsIterator m_lhsIter; const BinaryFunc m_functor; const Index m_outer; @@ -298,16 +310,6 @@ class sparse_cwise_binary_op_inner_iterator_selector, Lhs, * Implementation of SparseMatrixBase and SparseCwise functions/operators ***************************************************************************/ -// template -// template -// EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, -// Derived, OtherDerived> -// SparseMatrixBase::operator-(const SparseMatrixBase &other) const -// { -// return CwiseBinaryOp, -// Derived, OtherDerived>(derived(), other.derived()); -// } - template template EIGEN_STRONG_INLINE Derived & @@ -316,14 +318,6 @@ SparseMatrixBase::operator-=(const SparseMatrixBase &othe return *this = derived() - other.derived(); } -// template -// template -// EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, Derived, OtherDerived> -// SparseMatrixBase::operator+(const SparseMatrixBase &other) const -// { -// return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); -// } - template template EIGEN_STRONG_INLINE Derived & @@ -332,14 +326,6 @@ SparseMatrixBase::operator+=(const SparseMatrixBase& othe return *this = derived() + other.derived(); } -// template -// template -// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE -// SparseCwise::operator*(const SparseMatrixBase &other) const -// { -// return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived()); -// } - template template EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE @@ -348,28 +334,6 @@ SparseMatrixBase::cwiseProduct(const MatrixBase &other) c return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived()); } -// template -// template -// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) -// SparseCwise::operator/(const SparseMatrixBase &other) const -// { -// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived()); -// } -// -// template -// template -// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) -// SparseCwise::operator/(const MatrixBase &other) const -// { -// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived()); -// } - -// template -// template -// inline ExpressionType& SparseCwise::operator*=(const SparseMatrixBase &other) -// { -// return m_matrix.const_cast_derived() = _expression() * other.derived(); -// } - +} // end namespace Eigen #endif // EIGEN_SPARSE_CWISE_BINARY_OP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h similarity index 50% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseCwiseUnaryOp.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h index aa068835f..2a63cf2af 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseCwiseUnaryOp.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h @@ -25,18 +25,7 @@ #ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H #define EIGEN_SPARSE_CWISE_UNARY_OP_H -// template -// struct internal::traits > : internal::traits -// { -// typedef typename internal::result_of< -// UnaryOp(typename MatrixType::Scalar) -// >::type Scalar; -// typedef typename MatrixType::Nested MatrixTypeNested; -// typedef typename internal::remove_reference::type _MatrixTypeNested; -// enum { -// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + internal::functor_traits::Cost -// }; -// }; +namespace Eigen { template class CwiseUnaryOpImpl @@ -45,39 +34,61 @@ class CwiseUnaryOpImpl public: class InnerIterator; -// typedef typename internal::remove_reference::type _LhsNested; + class ReverseInnerIterator; typedef CwiseUnaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) + + protected: + typedef typename internal::traits::_XprTypeNested _MatrixTypeNested; + typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; + typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; }; template class CwiseUnaryOpImpl::InnerIterator + : public CwiseUnaryOpImpl::MatrixTypeIterator { typedef typename CwiseUnaryOpImpl::Scalar Scalar; - typedef typename internal::traits::_XprTypeNested _MatrixTypeNested; - typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; - typedef typename MatrixType::Index Index; + typedef typename CwiseUnaryOpImpl::MatrixTypeIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, Index outer) - : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) + EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) + : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE InnerIterator& operator++() - { ++m_iter; return *this; } + { Base::operator++(); return *this; } - EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); } - - EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } - - EIGEN_STRONG_INLINE operator bool() const { return m_iter; } + EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } protected: - MatrixTypeIterator m_iter; const UnaryOp m_functor; + private: + typename CwiseUnaryOpImpl::Scalar& valueRef(); +}; + +template +class CwiseUnaryOpImpl::ReverseInnerIterator + : public CwiseUnaryOpImpl::MatrixTypeReverseIterator +{ + typedef typename CwiseUnaryOpImpl::Scalar Scalar; + typedef typename CwiseUnaryOpImpl::MatrixTypeReverseIterator Base; + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer) + : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) + {} + + EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() + { Base::operator--(); return *this; } + + EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); } + + protected: + const UnaryOp m_functor; + private: + typename CwiseUnaryOpImpl::Scalar& valueRef(); }; template @@ -87,39 +98,58 @@ class CwiseUnaryViewImpl public: class InnerIterator; -// typedef typename internal::remove_reference::type _LhsNested; + class ReverseInnerIterator; typedef CwiseUnaryView Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) + + protected: + typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; + typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; + typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator; }; template class CwiseUnaryViewImpl::InnerIterator + : public CwiseUnaryViewImpl::MatrixTypeIterator { typedef typename CwiseUnaryViewImpl::Scalar Scalar; - typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; - typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; - typedef typename MatrixType::Index Index; + typedef typename CwiseUnaryViewImpl::MatrixTypeIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, Index outer) - : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor()) + EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) + : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} EIGEN_STRONG_INLINE InnerIterator& operator++() - { ++m_iter; return *this; } + { Base::operator++(); return *this; } - EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); } - EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(m_iter.valueRef()); } - - EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } - - EIGEN_STRONG_INLINE operator bool() const { return m_iter; } + EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } + EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } + + protected: + const ViewOp m_functor; +}; + +template +class CwiseUnaryViewImpl::ReverseInnerIterator + : public CwiseUnaryViewImpl::MatrixTypeReverseIterator +{ + typedef typename CwiseUnaryViewImpl::Scalar Scalar; + typedef typename CwiseUnaryViewImpl::MatrixTypeReverseIterator Base; + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer) + : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) + {} + + EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() + { Base::operator--(); return *this; } + + EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); } + EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); } protected: - MatrixTypeIterator m_iter; const ViewOp m_functor; }; @@ -143,4 +173,6 @@ SparseMatrixBase::operator/=(const Scalar& other) return derived(); } +} // end namespace Eigen + #endif // EIGEN_SPARSE_CWISE_UNARY_OP_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDenseProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDenseProduct.h similarity index 61% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDenseProduct.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDenseProduct.h index 0f77aa5be..00ba606be 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDenseProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDenseProduct.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H +namespace Eigen { + template struct SparseDenseProductReturnType { typedef SparseTimeDenseProduct Type; @@ -149,6 +151,102 @@ struct traits > typedef Dense StorageKind; typedef MatrixXpr XprKind; }; + +template +struct sparse_time_dense_product_impl; + +template +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename Lhs::Index Index; + typedef typename Lhs::InnerIterator LhsInnerIterator; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + { + for(Index c=0; c +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename Lhs::InnerIterator LhsInnerIterator; + typedef typename Lhs::Index Index; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + { + for(Index c=0; c +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename Lhs::InnerIterator LhsInnerIterator; + typedef typename Lhs::Index Index; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + { + for(Index j=0; j +struct sparse_time_dense_product_impl +{ + typedef typename internal::remove_all::type Lhs; + typedef typename internal::remove_all::type Rhs; + typedef typename internal::remove_all::type Res; + typedef typename Lhs::InnerIterator LhsInnerIterator; + typedef typename Lhs::Index Index; + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + { + for(Index j=0; j +inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) +{ + sparse_time_dense_product_impl::run(lhs, rhs, res, alpha); +} + } // end namespace internal template @@ -163,21 +261,7 @@ class SparseTimeDenseProduct template void scaleAndAddTo(Dest& dest, Scalar alpha) const { - typedef typename internal::remove_all::type _Lhs; - typedef typename internal::remove_all::type _Rhs; - typedef typename _Lhs::InnerIterator LhsInnerIterator; - enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit }; - for(Index j=0; j void scaleAndAddTo(Dest& dest, Scalar alpha) const { - typedef typename internal::remove_all::type _Rhs; - typedef typename _Rhs::InnerIterator RhsInnerIterator; - enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit }; - for(Index j=0; j lhs_t(m_lhs); + Transpose rhs_t(m_rhs); + Transpose dest_t(dest); + internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha); } private: @@ -228,4 +310,6 @@ SparseMatrixBase::operator*(const MatrixBase &other) cons return typename SparseDenseProductReturnType::Type(derived(), other.derived()); } +} // end namespace Eigen + #endif // EIGEN_SPARSEDENSEPRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDiagonalProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h similarity index 99% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDiagonalProduct.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h index fb9a29c05..b09c4a715 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDiagonalProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H #define EIGEN_SPARSE_DIAGONAL_PRODUCT_H +namespace Eigen { + // The product of a diagonal matrix with a sparse matrix can be easily // implemented using expression template. // We have two consider very different cases: @@ -192,4 +194,6 @@ SparseMatrixBase::operator*(const DiagonalBase &other) co return SparseDiagonalProduct(this->derived(), other.derived()); } +} // end namespace Eigen + #endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDot.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDot.h similarity index 86% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDot.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDot.h index 1f10f71a4..ebb63d36b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseDot.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseDot.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSE_DOT_H #define EIGEN_SPARSE_DOT_H +namespace Eigen { + template template typename internal::traits::Scalar @@ -40,7 +42,7 @@ SparseMatrixBase::dot(const MatrixBase& other) const eigen_assert(other.size()>0 && "you are using a non initialized vector"); typename Derived::InnerIterator i(derived(),0); - Scalar res = 0; + Scalar res(0); while (i) { res += internal::conj(i.value()) * other.coeff(i.index()); @@ -62,9 +64,17 @@ SparseMatrixBase::dot(const SparseMatrixBase& other) cons eigen_assert(size() == other.size()); - typename Derived::InnerIterator i(derived(),0); - typename OtherDerived::InnerIterator j(other.derived(),0); - Scalar res = 0; + typedef typename Derived::Nested Nested; + typedef typename OtherDerived::Nested OtherNested; + typedef typename internal::remove_all::type NestedCleaned; + typedef typename internal::remove_all::type OtherNestedCleaned; + + const Nested nthis(derived()); + const OtherNested nother(other.derived()); + + typename NestedCleaned::InnerIterator i(nthis,0); + typename OtherNestedCleaned::InnerIterator j(nother,0); + Scalar res(0); while (i && j) { if (i.index()==j.index()) @@ -94,4 +104,6 @@ SparseMatrixBase::norm() const return internal::sqrt(squaredNorm()); } +} // end namespace Eigen + #endif // EIGEN_SPARSE_DOT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseFuzzy.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseFuzzy.h similarity index 100% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseFuzzy.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseFuzzy.h diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrix.h new file mode 100644 index 000000000..214f130f5 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrix.h @@ -0,0 +1,1127 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEMATRIX_H +#define EIGEN_SPARSEMATRIX_H + +namespace Eigen { + +/** \ingroup SparseCore_Module + * + * \class SparseMatrix + * + * \brief A versatible sparse matrix representation + * + * This class implements a more versatile variants of the common \em compressed row/column storage format. + * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. + * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra + * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero + * can be done with limited memory reallocation and copies. + * + * A call to the function makeCompressed() turns the matrix into the standard \em compressed format + * compatible with many library. + * + * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages". + * + * \tparam _Scalar the scalar type, i.e. the type of the coefficients + * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility + * is RowMajor. The default is 0 which means column-major. + * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. + */ + +namespace internal { +template +struct traits > +{ + typedef _Scalar Scalar; + typedef _Index Index; + typedef Sparse StorageKind; + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = _Options | NestByRefBit | LvalueBit, + CoeffReadCost = NumTraits::ReadCost, + SupportedAccessPatterns = InnerRandomAccessPattern + }; +}; + +template +struct traits, DiagIndex> > +{ + typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef _Index Index; + typedef MatrixXpr XprKind; + + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = 1, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = 1, + Flags = 0, + CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10 + }; +}; + +} // end namespace internal + +template +class SparseMatrix + : public SparseMatrixBase > +{ + public: + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=) + + typedef MappedSparseMatrix Map; + using Base::IsRowMajor; + typedef internal::CompressedStorage Storage; + enum { + Options = _Options + }; + + protected: + + typedef SparseMatrix TransposedSparseMatrix; + + Index m_outerSize; + Index m_innerSize; + Index* m_outerIndex; + Index* m_innerNonZeros; // optional, if null then the data is compressed + Storage m_data; + + Eigen::Map > innerNonZeros() { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + const Eigen::Map > innerNonZeros() const { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + + public: + + /** \returns whether \c *this is in compressed form. */ + inline bool isCompressed() const { return m_innerNonZeros==0; } + + /** \returns the number of rows of the matrix */ + inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + /** \returns the number of columns of the matrix */ + inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + + /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ + inline Index innerSize() const { return m_innerSize; } + /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ + inline Index outerSize() const { return m_outerSize; } + + /** \returns a const pointer to the array of values. + * This function is aimed at interoperability with other libraries. + * \sa innerIndexPtr(), outerIndexPtr() */ + inline const Scalar* valuePtr() const { return &m_data.value(0); } + /** \returns a non-const pointer to the array of values. + * This function is aimed at interoperability with other libraries. + * \sa innerIndexPtr(), outerIndexPtr() */ + inline Scalar* valuePtr() { return &m_data.value(0); } + + /** \returns a const pointer to the array of inner indices. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), outerIndexPtr() */ + inline const Index* innerIndexPtr() const { return &m_data.index(0); } + /** \returns a non-const pointer to the array of inner indices. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), outerIndexPtr() */ + inline Index* innerIndexPtr() { return &m_data.index(0); } + + /** \returns a const pointer to the array of the starting positions of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), innerIndexPtr() */ + inline const Index* outerIndexPtr() const { return m_outerIndex; } + /** \returns a non-const pointer to the array of the starting positions of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \sa valuePtr(), innerIndexPtr() */ + inline Index* outerIndexPtr() { return m_outerIndex; } + + /** \returns a const pointer to the array of the number of non zeros of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 in compressed mode */ + inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; } + /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. + * This function is aimed at interoperability with other libraries. + * \warning it returns the null pointer 0 in compressed mode */ + inline Index* innerNonZeroPtr() { return m_innerNonZeros; } + + /** \internal */ + inline Storage& data() { return m_data; } + /** \internal */ + inline const Storage& data() const { return m_data; } + + /** \returns the value of the matrix at position \a i, \a j + * This function returns Scalar(0) if the element is an explicit \em zero */ + inline Scalar coeff(Index row, Index col) const + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; + return m_data.atInRange(m_outerIndex[outer], end, inner); + } + + /** \returns a non-const reference to the value of the matrix at position \a i, \a j + * + * If the element does not exist then it is inserted via the insert(Index,Index) function + * which itself turns the matrix into a non compressed form if that was not the case. + * + * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) + * function if the element does not already exist. + */ + inline Scalar& coeffRef(Index row, Index col) + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index start = m_outerIndex[outer]; + Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; + eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); + if(end<=start) + return insert(row,col); + const Index p = m_data.searchLowerIndex(start,end-1,inner); + if((p(m_data.size()); + } + + /** Preallocates \a reserveSize non zeros. + * + * Precondition: the matrix must be in compressed mode. */ + inline void reserve(Index reserveSize) + { + eigen_assert(isCompressed() && "This function does not make sense in non compressed mode."); + m_data.reserve(reserveSize); + } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j. + * + * This function turns the matrix in non-compressed mode */ + template + inline void reserve(const SizesType& reserveSizes); + #else + template + inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type()) + { + EIGEN_UNUSED_VARIABLE(enableif); + reserveInnerVectors(reserveSizes); + } + template + inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif = typename SizesType::Scalar()) + { + EIGEN_UNUSED_VARIABLE(enableif); + reserveInnerVectors(reserveSizes); + } + #endif // EIGEN_PARSED_BY_DOXYGEN + protected: + template + inline void reserveInnerVectors(const SizesType& reserveSizes) + { + + if(isCompressed()) + { + std::size_t totalReserveSize = 0; + // turn the matrix into non-compressed mode + m_innerNonZeros = new Index[m_outerSize]; + + // temporarily use m_innerSizes to hold the new starting points. + Index* newOuterIndex = m_innerNonZeros; + + Index count = 0; + for(Index j=0; j=0; --j) + { + ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j]; + for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i) + { + m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); + m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); + } + previousOuterIndex = m_outerIndex[j]; + m_outerIndex[j] = newOuterIndex[j]; + m_innerNonZeros[j] = innerNNZ; + } + m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; + + m_data.resize(m_outerIndex[m_outerSize]); + } + else + { + Index* newOuterIndex = new Index[m_outerSize+1]; + Index count = 0; + for(Index j=0; j(reserveSizes[j], alreadyReserved); + count += toReserve + m_innerNonZeros[j]; + } + newOuterIndex[m_outerSize] = count; + + m_data.resize(count); + for(ptrdiff_t j=m_outerSize-1; j>=0; --j) + { + std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j]; + if(offset>0) + { + std::ptrdiff_t innerNNZ = m_innerNonZeros[j]; + for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i) + { + m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); + m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); + } + } + } + + std::swap(m_outerIndex, newOuterIndex); + delete[] newOuterIndex; + } + + } + public: + + //--- low level purely coherent filling --- + + /** \internal + * \returns a reference to the non zero coefficient at position \a row, \a col assuming that: + * - the nonzero does not already exist + * - the new coefficient is the last one according to the storage order + * + * Before filling a given inner vector you must call the statVec(Index) function. + * + * After an insertion session, you should call the finalize() function. + * + * \sa insert, insertBackByOuterInner, startVec */ + inline Scalar& insertBack(Index row, Index col) + { + return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); + } + + /** \internal + * \sa insertBack, startVec */ + inline Scalar& insertBackByOuterInner(Index outer, Index inner) + { + eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); + eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)(m_data.size()); + Index i = m_outerSize; + // find the last filled column + while (i>=0 && m_outerIndex[i]==0) + --i; + ++i; + while (i<=m_outerSize) + { + m_outerIndex[i] = size; + ++i; + } + } + } + + //--- + + template + void setFromTriplets(const InputIterators& begin, const InputIterators& end); + + void sumupDuplicates(); + + //--- + + /** \internal + * same as insert(Index,Index) except that the indices are given relative to the storage order */ + EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i) + { + return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); + } + + /** Turns the matrix into the \em compressed format. + */ + void makeCompressed() + { + if(isCompressed()) + return; + + Index oldStart = m_outerIndex[1]; + m_outerIndex[1] = m_innerNonZeros[0]; + for(Index j=1; j0) + { + for(Index k=0; k::dummy_precision()) + { + prune(default_prunning_func(reference,epsilon)); + } + + /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep. + * The functor type \a KeepFunc must implement the following function: + * \code + * bool operator() (const Index& row, const Index& col, const Scalar& value) const; + * \endcode + * \sa prune(Scalar,RealScalar) + */ + template + void prune(const KeepFunc& keep = KeepFunc()) + { + // TODO optimize the uncompressed mode to avoid moving and allocating the data twice + // TODO also implement a unit test + makeCompressed(); + + Index k = 0; + for(Index j=0; j diagonal() const { return *this; } + + /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */ + inline SparseMatrix() + : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + resize(0, 0); + } + + /** Constructs a \a rows \c x \a cols empty matrix */ + inline SparseMatrix(Index rows, Index cols) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + resize(rows, cols); + } + + /** Constructs a sparse matrix from the sparse expression \a other */ + template + inline SparseMatrix(const SparseMatrixBase& other) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + *this = other.derived(); + } + + /** Copy constructor (it performs a deep copy) */ + inline SparseMatrix(const SparseMatrix& other) + : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + *this = other.derived(); + } + + /** Swaps the content of two sparse matrices of the same type. + * This is a fast operation that simply swaps the underlying pointers and parameters. */ + inline void swap(SparseMatrix& other) + { + //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); + std::swap(m_outerIndex, other.m_outerIndex); + std::swap(m_innerSize, other.m_innerSize); + std::swap(m_outerSize, other.m_outerSize); + std::swap(m_innerNonZeros, other.m_innerNonZeros); + m_data.swap(other.m_data); + } + + inline SparseMatrix& operator=(const SparseMatrix& other) + { + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else + { + initAssignment(other); + if(other.isCompressed()) + { + memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index)); + m_data = other.m_data; + } + else + { + Base::operator=(other); + } + } + return *this; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + inline SparseMatrix& operator=(const SparseSparseProduct& product) + { return Base::operator=(product); } + + template + inline SparseMatrix& operator=(const ReturnByValue& other) + { return Base::operator=(other.derived()); } + + template + inline SparseMatrix& operator=(const EigenBase& other) + { return Base::operator=(other.derived()); } + #endif + + template + EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase& other) + { + initAssignment(other.derived()); + const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + if (needToTranspose) + { + // two passes algorithm: + // 1 - compute the number of coeffs per dest inner vector + // 2 - do the actual copy/eval + // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed + typedef typename internal::nested::type OtherCopy; + typedef typename internal::remove_all::type _OtherCopy; + OtherCopy otherCopy(other.derived()); + + Eigen::Map > (m_outerIndex,outerSize()).setZero(); + // pass 1 + // FIXME the above copy could be merged with that pass + for (Index j=0; j&>(m); + return s; + } + + /** Destructor */ + inline ~SparseMatrix() + { + delete[] m_outerIndex; + delete[] m_innerNonZeros; + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** Overloaded for performance */ + Scalar sum() const; +#endif + +# ifdef EIGEN_SPARSEMATRIX_PLUGIN +# include EIGEN_SPARSEMATRIX_PLUGIN +# endif + +protected: + + template + void initAssignment(const Other& other) + { + resize(other.rows(), other.cols()); + if(m_innerNonZeros) + { + delete[] m_innerNonZeros; + m_innerNonZeros = 0; + } + } + + /** \internal + * \sa insert(Index,Index) */ + EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col) + { + eigen_assert(isCompressed()); + + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index previousOuter = outer; + if (m_outerIndex[outer+1]==0) + { + // we start a new inner vector + while (previousOuter>=0 && m_outerIndex[previousOuter]==0) + { + m_outerIndex[previousOuter] = static_cast(m_data.size()); + --previousOuter; + } + m_outerIndex[outer+1] = m_outerIndex[outer]; + } + + // here we have to handle the tricky case where the outerIndex array + // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., + // the 2nd inner vector... + bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) + && (size_t(m_outerIndex[outer+1]) == m_data.size()); + + size_t startId = m_outerIndex[outer]; + // FIXME let's make sure sizeof(long int) == sizeof(size_t) + size_t p = m_outerIndex[outer+1]; + ++m_outerIndex[outer+1]; + + float reallocRatio = 1; + if (m_data.allocatedSize()<=m_data.size()) + { + // if there is no preallocated memory, let's reserve a minimum of 32 elements + if (m_data.size()==0) + { + m_data.reserve(32); + } + else + { + // we need to reallocate the data, to reduce multiple reallocations + // we use a smart resize algorithm based on the current filling ratio + // in addition, we use float to avoid integers overflows + float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); + reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); + // furthermore we bound the realloc ratio to: + // 1) reduce multiple minor realloc when the matrix is almost filled + // 2) avoid to allocate too much memory when the matrix is almost empty + reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); + } + } + m_data.resize(m_data.size()+1,reallocRatio); + + if (!isLastVec) + { + if (previousOuter==-1) + { + // oops wrong guess. + // let's correct the outer offsets + for (Index k=0; k<=(outer+1); ++k) + m_outerIndex[k] = 0; + Index k=outer+1; + while(m_outerIndex[k]==0) + m_outerIndex[k++] = 1; + while (k<=m_outerSize && m_outerIndex[k]!=0) + m_outerIndex[k++]++; + p = 0; + --k; + k = m_outerIndex[k]-1; + while (k>0) + { + m_data.index(k) = m_data.index(k-1); + m_data.value(k) = m_data.value(k-1); + k--; + } + } + else + { + // we are not inserting into the last inner vec + // update outer indices: + Index j = outer+2; + while (j<=m_outerSize && m_outerIndex[j]!=0) + m_outerIndex[j++]++; + --j; + // shift data of last vecs: + Index k = m_outerIndex[j]-1; + while (k>=Index(p)) + { + m_data.index(k) = m_data.index(k-1); + m_data.value(k) = m_data.value(k-1); + k--; + } + } + } + + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + + m_data.index(p) = inner; + return (m_data.value(p) = 0); + } + + /** \internal + * A vector object that is equal to 0 everywhere but v at the position i */ + class SingletonVector + { + Index m_index; + Index m_value; + public: + typedef Index value_type; + SingletonVector(Index i, Index v) + : m_index(i), m_value(v) + {} + + Index operator[](Index i) const { return i==m_index ? m_value : 0; } + }; + + /** \internal + * \sa insert(Index,Index) */ + EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col) + { + eigen_assert(!isCompressed()); + + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer]; + std::ptrdiff_t innerNNZ = m_innerNonZeros[outer]; + if(innerNNZ>=room) + { + // this inner vector is full, we need to reallocate the whole buffer :( + reserve(SingletonVector(outer,std::max(2,innerNNZ))); + } + + Index startId = m_outerIndex[outer]; + Index p = startId + m_innerNonZeros[outer]; + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + + m_innerNonZeros[outer]++; + + m_data.index(p) = inner; + return (m_data.value(p) = 0); + } + +public: + /** \internal + * \sa insert(Index,Index) */ + inline Scalar& insertBackUncompressed(Index row, Index col) + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(!isCompressed()); + eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); + + Index p = m_outerIndex[outer] + m_innerNonZeros[outer]; + m_innerNonZeros[outer]++; + m_data.index(p) = inner; + return (m_data.value(p) = 0); + } + +private: + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + } + + struct default_prunning_func { + default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {} + inline bool operator() (const Index&, const Index&, const Scalar& value) const + { + return !internal::isMuchSmallerThan(value, reference, epsilon); + } + Scalar reference; + RealScalar epsilon; + }; +}; + +template +class SparseMatrix::InnerIterator +{ + public: + InnerIterator(const SparseMatrix& mat, Index outer) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]) + { + if(mat.isCompressed()) + m_end = mat.m_outerIndex[outer+1]; + else + m_end = m_id + mat.m_innerNonZeros[outer]; + } + + inline InnerIterator& operator++() { m_id++; return *this; } + + inline const Scalar& value() const { return m_values[m_id]; } + inline Scalar& valueRef() { return const_cast(m_values[m_id]); } + + inline Index index() const { return m_indices[m_id]; } + inline Index outer() const { return m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } + + inline operator bool() const { return (m_id < m_end); } + + protected: + const Scalar* m_values; + const Index* m_indices; + const Index m_outer; + Index m_id; + Index m_end; +}; + +template +class SparseMatrix::ReverseInnerIterator +{ + public: + ReverseInnerIterator(const SparseMatrix& mat, Index outer) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer]) + { + if(mat.isCompressed()) + m_id = mat.m_outerIndex[outer+1]; + else + m_id = m_start + mat.m_innerNonZeros[outer]; + } + + inline ReverseInnerIterator& operator--() { --m_id; return *this; } + + inline const Scalar& value() const { return m_values[m_id-1]; } + inline Scalar& valueRef() { return const_cast(m_values[m_id-1]); } + + inline Index index() const { return m_indices[m_id-1]; } + inline Index outer() const { return m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } + + inline operator bool() const { return (m_id > m_start); } + + protected: + const Scalar* m_values; + const Index* m_indices; + const Index m_outer; + Index m_id; + const Index m_start; +}; + +namespace internal { + +template +void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0) +{ + EIGEN_UNUSED_VARIABLE(Options); + enum { IsRowMajor = SparseMatrixType::IsRowMajor }; + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::Index Index; + SparseMatrix trMat(mat.rows(),mat.cols()); + + // pass 1: count the nnz per inner-vector + VectorXi wi(trMat.outerSize()); + wi.setZero(); + for(InputIterator it(begin); it!=end; ++it) + wi(IsRowMajor ? it->col() : it->row())++; + + // pass 2: insert all the elements into trMat + trMat.reserve(wi); + for(InputIterator it(begin); it!=end; ++it) + trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); + + // pass 3: + trMat.sumupDuplicates(); + + // pass 4: transposed copy -> implicit sorting + mat = trMat; +} + +} + + +/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b. + * + * A \em triplet is a tuple (i,j,value) defining a non-zero element. + * The input list of triplets does not have to be sorted, and can contains duplicated elements. + * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up. + * This is a \em O(n) operation, with \em n the number of triplet elements. + * The initial contents of \c *this is destroyed. + * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, + * or the resize(Index,Index) method. The sizes are not extracted from the triplet list. + * + * The \a InputIterators value_type must provide the following interface: + * \code + * Scalar value() const; // the value + * Scalar row() const; // the row index i + * Scalar col() const; // the column index j + * \endcode + * See for instance the Eigen::Triplet template class. + * + * Here is a typical usage example: + * \code + typedef Triplet T; + std::vector tripletList; + triplets.reserve(estimation_of_entries); + for(...) + { + // ... + tripletList.push_back(T(i,j,v_ij)); + } + SparseMatrixType m(rows,cols); + m.setFromTriplets(tripletList.begin(), tripletList.end()); + // m is ready to go! + * \endcode + * + * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define + * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather + * be explicitely stored into a std::vector for instance. + */ +template +template +void SparseMatrix::setFromTriplets(const InputIterators& begin, const InputIterators& end) +{ + internal::set_from_triplets(begin, end, *this); +} + +/** \internal */ +template +void SparseMatrix::sumupDuplicates() +{ + eigen_assert(!isCompressed()); + // TODO, in practice we should be able to use m_innerNonZeros for that task + VectorXi wi(innerSize()); + wi.fill(-1); + Index count = 0; + // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers + for(int j=0; j=start) + { + // we already meet this entry => accumulate it + m_data.value(wi(i)) += m_data.value(k); + } + else + { + m_data.value(count) = m_data.value(k); + m_data.index(count) = m_data.index(k); + wi(i) = count; + ++count; + } + } + m_outerIndex[j] = start; + } + m_outerIndex[m_outerSize] = count; + + // turn the matrix into compressed form + delete[] m_innerNonZeros; + m_innerNonZeros = 0; + m_data.resize(m_outerIndex[m_outerSize]); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSEMATRIX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h similarity index 55% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h index c01981bc9..9d7b83577 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2008-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -25,7 +25,9 @@ #ifndef EIGEN_SPARSEMATRIXBASE_H #define EIGEN_SPARSEMATRIXBASE_H -/** \ingroup Sparse_Module +namespace Eigen { + +/** \ingroup SparseCore_Module * * \class SparseMatrixBase * @@ -44,6 +46,9 @@ template class SparseMatrixBase : public EigenBase typedef typename internal::packet_traits::type PacketScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; + typedef typename internal::add_const_on_value_type_if_arithmetic< + typename internal::packet_traits::type + >::type PacketReturnType; typedef SparseMatrixBase StorageBaseType; typedef EigenBase Base; @@ -54,8 +59,6 @@ template class SparseMatrixBase : public EigenBase other.derived().evalTo(derived()); return derived(); } - -// using Base::operator=; enum { @@ -107,15 +110,6 @@ template class SparseMatrixBase : public EigenBase #endif }; - /* \internal the return type of MatrixBase::conjugate() */ -// typedef typename internal::conditional::IsComplex, -// const SparseCwiseUnaryOp, Derived>, -// const Derived& -// >::type ConjugateReturnType; - /* \internal the return type of MatrixBase::real() */ -// typedef SparseCwiseUnaryOp, Derived> RealReturnType; - /* \internal the return type of MatrixBase::imag() */ -// typedef SparseCwiseUnaryOp, Derived> ImagReturnType; /** \internal the return type of MatrixBase::adjoint() */ typedef typename internal::conditional::IsComplex, CwiseUnaryOp, Eigen::Transpose >, @@ -125,16 +119,6 @@ template class SparseMatrixBase : public EigenBase typedef SparseMatrix PlainObject; -#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase -# include "../plugins/CommonCwiseUnaryOps.h" -# include "../plugins/CommonCwiseBinaryOps.h" -# include "../plugins/MatrixCwiseUnaryOps.h" -# include "../plugins/MatrixCwiseBinaryOps.h" -# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN -# include EIGEN_SPARSEMATRIXBASE_PLUGIN -# endif -# undef EIGEN_CURRENT_STORAGE_BASE_CLASS -#undef EIGEN_CURRENT_STORAGE_BASE_CLASS #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers @@ -162,12 +146,24 @@ template class SparseMatrixBase : public EigenBase { return *static_cast(const_cast(this)); } #endif // not EIGEN_PARSED_BY_DOXYGEN - /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase +# include "../plugins/CommonCwiseUnaryOps.h" +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN +# include EIGEN_SPARSEMATRIXBASE_PLUGIN +# endif +# undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS + + + /** \returns the number of rows. \sa cols() */ inline Index rows() const { return derived().rows(); } - /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + /** \returns the number of columns. \sa rows() */ inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). - * \sa rows(), cols(), SizeAtCompileTime. */ + * \sa rows(), cols(). */ inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ @@ -188,16 +184,7 @@ template class SparseMatrixBase : public EigenBase Derived& markAsRValue() { m_isRValue = true; return derived(); } SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ } - - inline Derived& operator=(const Derived& other) - { -// std::cout << "Derived& operator=(const Derived& other)\n"; -// if (other.isRValue()) -// derived().swap(other.const_cast_derived()); -// else - this->operator=(other); - return derived(); - } + template Derived& operator=(const ReturnByValue& other) @@ -207,10 +194,54 @@ template class SparseMatrixBase : public EigenBase } + template + inline Derived& operator=(const SparseMatrixBase& other) + { + return assign(other.derived()); + } + + inline Derived& operator=(const Derived& other) + { +// if (other.isRValue()) +// derived().swap(other.const_cast_derived()); +// else + return assign(other.derived()); + } + + protected: + + template + inline Derived& assign(const OtherDerived& other) + { + const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); + if ((!transpose) && other.isRValue()) + { + // eval without temporary + derived().resize(other.rows(), other.cols()); + derived().setZero(); + derived().reserve((std::max)(this->rows(),this->cols())*2); + for (Index j=0; j inline void assignGeneric(const OtherDerived& other) { -// std::cout << "Derived& operator=(const MatrixBase& other)\n"; //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); eigen_assert(( ((internal::traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) && @@ -230,8 +261,7 @@ template class SparseMatrixBase : public EigenBase for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it) { Scalar v = it.value(); - if (v!=Scalar(0)) - temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v; + temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v; } } temp.finalize(); @@ -239,54 +269,23 @@ template class SparseMatrixBase : public EigenBase derived() = temp.markAsRValue(); } - - template - inline Derived& operator=(const SparseMatrixBase& other) - { -// std::cout << typeid(OtherDerived).name() << "\n"; -// std::cout << Flags << " " << OtherDerived::Flags << "\n"; - const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); -// std::cout << "eval transpose = " << transpose << "\n"; - const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); - if ((!transpose) && other.isRValue()) - { - // eval without temporary - derived().resize(other.rows(), other.cols()); - derived().setZero(); - derived().reserve((std::max)(this->rows(),this->cols())*2); - for (Index j=0; j inline Derived& operator=(const SparseSparseProduct& product); - template - inline void _experimentalNewProduct(const Lhs& lhs, const Rhs& rhs); - friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m) { + typedef typename Derived::Nested Nested; + typedef typename internal::remove_all::type NestedCleaned; + if (Flags&RowMajorBit) { - for (Index row=0; row class SparseMatrixBase : public EigenBase } else { + const Nested nm(m.derived()); if (m.cols() == 1) { Index row = 0; - for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it) + for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it) { for ( ; row class SparseMatrixBase : public EigenBase } else { - SparseMatrix trans = m.derived(); - s << trans; + SparseMatrix trans = m; + s << static_cast >&>(trans); } } return s; } -// const SparseCwiseUnaryOp::Scalar>,Derived> operator-() const; - -// template -// const CwiseBinaryOp::Scalar>, Derived, OtherDerived> -// operator+(const SparseMatrixBase &other) const; - -// template -// const CwiseBinaryOp::Scalar>, Derived, OtherDerived> -// operator-(const SparseMatrixBase &other) const; - template Derived& operator+=(const SparseMatrixBase& other); template Derived& operator-=(const SparseMatrixBase& other); -// template -// Derived& operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other); - Derived& operator*=(const Scalar& other); Derived& operator/=(const Scalar& other); @@ -358,16 +345,6 @@ template class SparseMatrixBase : public EigenBase EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE cwiseProduct(const MatrixBase &other) const; -// const SparseCwiseUnaryOp::Scalar>, Derived> -// operator*(const Scalar& scalar) const; -// const SparseCwiseUnaryOp::Scalar>, Derived> -// operator/(const Scalar& scalar) const; - -// inline friend const SparseCwiseUnaryOp::Scalar>, Derived> -// operator*(const Scalar& scalar, const SparseMatrixBase& matrix) -// { return matrix*scalar; } - - // sparse * sparse template const typename SparseSparseProductReturnType::Type @@ -394,6 +371,12 @@ template class SparseMatrixBase : public EigenBase template const typename SparseDenseProductReturnType::Type operator*(const MatrixBase &other) const; + + /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ + SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const + { + return SparseSymmetricPermutationProduct(derived(), perm); + } template Derived& operator*=(const SparseMatrixBase& other); @@ -407,8 +390,6 @@ template class SparseMatrixBase : public EigenBase // deprecated template void solveTriangularInPlace(MatrixBase& other) const; -// template -// void solveTriangularInPlace(SparseMatrixBase& other) const; #endif // EIGEN2_SUPPORT template @@ -421,12 +402,9 @@ template class SparseMatrixBase : public EigenBase template Scalar dot(const SparseMatrixBase& other) const; RealScalar squaredNorm() const; RealScalar norm() const; -// const PlainObject normalized() const; -// void normalize(); Transpose transpose() { return derived(); } const Transpose transpose() const { return derived(); } - // void transposeInPlace(); const AdjointReturnType adjoint() const { return transpose(); } // sub-vector @@ -442,77 +420,14 @@ template class SparseMatrixBase : public EigenBase const SparseInnerVectorSet subrows(Index start, Index size) const; SparseInnerVectorSet subcols(Index start, Index size); const SparseInnerVectorSet subcols(Index start, Index size) const; + + SparseInnerVectorSet middleRows(Index start, Index size); + const SparseInnerVectorSet middleRows(Index start, Index size) const; + SparseInnerVectorSet middleCols(Index start, Index size); + const SparseInnerVectorSet middleCols(Index start, Index size) const; SparseInnerVectorSet innerVectors(Index outerStart, Index outerSize); const SparseInnerVectorSet innerVectors(Index outerStart, Index outerSize) const; -// typename BlockReturnType::Type block(int startRow, int startCol, int blockRows, int blockCols); -// const typename BlockReturnType::Type -// block(int startRow, int startCol, int blockRows, int blockCols) const; -// -// typename BlockReturnType::SubVectorType segment(int start, int size); -// const typename BlockReturnType::SubVectorType segment(int start, int size) const; -// -// typename BlockReturnType::SubVectorType start(int size); -// const typename BlockReturnType::SubVectorType start(int size) const; -// -// typename BlockReturnType::SubVectorType end(int size); -// const typename BlockReturnType::SubVectorType end(int size) const; -// -// template -// typename BlockReturnType::Type block(int startRow, int startCol); -// template -// const typename BlockReturnType::Type block(int startRow, int startCol) const; - -// template typename BlockReturnType::SubVectorType start(void); -// template const typename BlockReturnType::SubVectorType start() const; - -// template typename BlockReturnType::SubVectorType end(); -// template const typename BlockReturnType::SubVectorType end() const; - -// template typename BlockReturnType::SubVectorType segment(int start); -// template const typename BlockReturnType::SubVectorType segment(int start) const; - -// Diagonal diagonal(); -// const Diagonal diagonal() const; - -// template Part part(); -// template const Part part() const; - - -// static const ConstantReturnType Constant(int rows, int cols, const Scalar& value); -// static const ConstantReturnType Constant(int size, const Scalar& value); -// static const ConstantReturnType Constant(const Scalar& value); - -// template -// static const CwiseNullaryOp NullaryExpr(int rows, int cols, const CustomNullaryOp& func); -// template -// static const CwiseNullaryOp NullaryExpr(int size, const CustomNullaryOp& func); -// template -// static const CwiseNullaryOp NullaryExpr(const CustomNullaryOp& func); - -// static const ConstantReturnType Zero(int rows, int cols); -// static const ConstantReturnType Zero(int size); -// static const ConstantReturnType Zero(); -// static const ConstantReturnType Ones(int rows, int cols); -// static const ConstantReturnType Ones(int size); -// static const ConstantReturnType Ones(); -// static const IdentityReturnType Identity(); -// static const IdentityReturnType Identity(int rows, int cols); -// static const BasisReturnType Unit(int size, int i); -// static const BasisReturnType Unit(int i); -// static const BasisReturnType UnitX(); -// static const BasisReturnType UnitY(); -// static const BasisReturnType UnitZ(); -// static const BasisReturnType UnitW(); - -// const DiagonalMatrix asDiagonal() const; - -// Derived& setConstant(const Scalar& value); -// Derived& setZero(); -// Derived& setOnes(); -// Derived& setRandom(); -// Derived& setIdentity(); - /** \internal use operator= */ template void evalTo(MatrixBase& dst) const @@ -537,37 +452,6 @@ template class SparseMatrixBase : public EigenBase bool isApprox(const MatrixBase& other, RealScalar prec = NumTraits::dummy_precision()) const { return toDense().isApprox(other,prec); } -// bool isMuchSmallerThan(const RealScalar& other, -// RealScalar prec = NumTraits::dummy_precision()) const; -// template -// bool isMuchSmallerThan(const MatrixBase& other, -// RealScalar prec = NumTraits::dummy_precision()) const; - -// bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits::dummy_precision()) const; -// bool isZero(RealScalar prec = NumTraits::dummy_precision()) const; -// bool isOnes(RealScalar prec = NumTraits::dummy_precision()) const; -// bool isIdentity(RealScalar prec = NumTraits::dummy_precision()) const; -// bool isDiagonal(RealScalar prec = NumTraits::dummy_precision()) const; - -// bool isUpper(RealScalar prec = NumTraits::dummy_precision()) const; -// bool isLower(RealScalar prec = NumTraits::dummy_precision()) const; - -// template -// bool isOrthogonal(const MatrixBase& other, -// RealScalar prec = NumTraits::dummy_precision()) const; -// bool isUnitary(RealScalar prec = NumTraits::dummy_precision()) const; - -// template -// inline bool operator==(const MatrixBase& other) const -// { return (cwise() == other).all(); } - -// template -// inline bool operator!=(const MatrixBase& other) const -// { return (cwise() != other).any(); } - - -// template -// const SparseCwiseUnaryOp::Scalar, NewType>, Derived> cast() const; /** \returns the matrix or vector obtained by evaluating this expression. * @@ -577,130 +461,13 @@ template class SparseMatrixBase : public EigenBase inline const typename internal::eval::type eval() const { return typename internal::eval::type(derived()); } -// template -// void swap(MatrixBase const & other); - -// template -// const SparseFlagged marked() const; -// const Flagged lazy() const; - - /** \returns number of elements to skip to pass from one row (resp. column) to another - * for a row-major (resp. column-major) matrix. - * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data - * of the underlying matrix. - */ -// inline int stride(void) const { return derived().stride(); } - -// FIXME -// ConjugateReturnType conjugate() const; -// const RealReturnType real() const; -// const ImagReturnType imag() const; - -// template -// const SparseCwiseUnaryOp unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const; - -// template -// const CwiseBinaryOp -// binaryExpr(const MatrixBase &other, const CustomBinaryOp& func = CustomBinaryOp()) const; - - Scalar sum() const; -// Scalar trace() const; - -// typename internal::traits::Scalar minCoeff() const; -// typename internal::traits::Scalar maxCoeff() const; - -// typename internal::traits::Scalar minCoeff(int* row, int* col = 0) const; -// typename internal::traits::Scalar maxCoeff(int* row, int* col = 0) const; - -// template -// typename internal::result_of::Scalar)>::type -// redux(const BinaryOp& func) const; - -// template -// void visit(Visitor& func) const; - - -// const SparseCwise cwise() const; -// SparseCwise cwise(); - -// inline const WithFormat format(const IOFormat& fmt) const; - -/////////// Array module /////////// - /* - bool all(void) const; - bool any(void) const; - - const VectorwiseOp rowwise() const; - const VectorwiseOp colwise() const; - - static const CwiseNullaryOp,Derived> Random(int rows, int cols); - static const CwiseNullaryOp,Derived> Random(int size); - static const CwiseNullaryOp,Derived> Random(); - - template - const Select - select(const MatrixBase& thenMatrix, - const MatrixBase& elseMatrix) const; - - template - inline const Select - select(const MatrixBase& thenMatrix, typename ThenDerived::Scalar elseScalar) const; - - template - inline const Select - select(typename ElseDerived::Scalar thenScalar, const MatrixBase& elseMatrix) const; - - template RealScalar lpNorm() const; - */ - - -// template -// Scalar dot(const MatrixBase& other) const -// { -// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) -// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) -// EIGEN_STATIC_ASSERT((internal::is_same::value), -// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) -// -// eigen_assert(derived().size() == other.size()); -// // short version, but the assembly looks more complicated because -// // of the CwiseBinaryOp iterator complexity -// // return res = (derived().cwise() * other.derived().conjugate()).sum(); -// -// // optimized, generic version -// typename Derived::InnerIterator i(derived(),0); -// typename OtherDerived::InnerIterator j(other.derived(),0); -// Scalar res = 0; -// while (i && j) -// { -// if (i.index()==j.index()) -// { -// // std::cerr << i.value() << " * " << j.value() << "\n"; -// res += i.value() * internal::conj(j.value()); -// ++i; ++j; -// } -// else if (i.index() +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_PERMUTATION_H +#define EIGEN_SPARSE_PERMUTATION_H + +// This file implements sparse * permutation products + +namespace Eigen { + +namespace internal { + +template +struct traits > +{ + typedef typename remove_all::type MatrixTypeNestedCleaned; + typedef typename MatrixTypeNestedCleaned::Scalar Scalar; + typedef typename MatrixTypeNestedCleaned::Index Index; + enum { + SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, + MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight + }; + + typedef typename internal::conditional, + SparseMatrix >::type ReturnType; +}; + +template +struct permut_sparsematrix_product_retval + : public ReturnByValue > +{ + typedef typename remove_all::type MatrixTypeNestedCleaned; + typedef typename MatrixTypeNestedCleaned::Scalar Scalar; + typedef typename MatrixTypeNestedCleaned::Index Index; + + enum { + SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, + MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight + }; + + permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix) + : m_permutation(perm), m_matrix(matrix) + {} + + inline int rows() const { return m_matrix.rows(); } + inline int cols() const { return m_matrix.cols(); } + + template inline void evalTo(Dest& dst) const + { + if(MoveOuter) + { + SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); + VectorXi sizes(m_matrix.outerSize()); + for(Index j=0; j tmp(m_matrix.rows(), m_matrix.cols()); + VectorXi sizes(tmp.outerSize()); + sizes.setZero(); + PermutationMatrix perm; + if((Side==OnTheLeft) ^ Transposed) + perm = m_permutation; + else + perm = m_permutation.transpose(); + + for(Index j=0; j +inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false> +operator*(const SparseMatrixBase& matrix, const PermutationBase& perm) +{ + return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, false>(perm, matrix.derived()); +} + +/** \returns the matrix with the permutation applied to the rows + */ +template +inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false> +operator*( const PermutationBase& perm, const SparseMatrixBase& matrix) +{ + return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, false>(perm, matrix.derived()); +} + + + +/** \returns the matrix with the inverse permutation applied to the columns. + */ +template +inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true> +operator*(const SparseMatrixBase& matrix, const Transpose >& tperm) +{ + return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived()); +} + +/** \returns the matrix with the inverse permutation applied to the rows. + */ +template +inline const internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true> +operator*(const Transpose >& tperm, const SparseMatrixBase& matrix) +{ + return internal::permut_sparsematrix_product_retval, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseProduct.h similarity index 68% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseProduct.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseProduct.h index 1c1f54706..813dbf624 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseProduct.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSEPRODUCT_H #define EIGEN_SPARSEPRODUCT_H +namespace Eigen { + template struct SparseSparseProductReturnType { @@ -38,11 +40,11 @@ struct SparseSparseProductReturnType typedef typename internal::conditional, - const typename internal::nested::type>::type LhsNested; + typename internal::nested::type>::type LhsNested; typedef typename internal::conditional, - const typename internal::nested::type>::type RhsNested; + typename internal::nested::type>::type RhsNested; typedef SparseSparseProduct Type; }; @@ -106,9 +108,42 @@ class SparseSparseProduct : internal::no_assignment_operator, template EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs) - : m_lhs(lhs), m_rhs(rhs) + : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true) { - eigen_assert(lhs.cols() == rhs.rows()); + init(); + } + + template + EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance) + : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) + { + init(); + } + + SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits::dummy_precision()) const + { + return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon); + } + + template + void evalTo(Dest& result) const + { + if(m_conservative) + internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result); + else + internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance); + } + + EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } + + EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } + EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } + + protected: + void init() + { + eigen_assert(m_lhs.cols() == m_rhs.rows()); enum { ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic @@ -127,15 +162,40 @@ class SparseSparseProduct : internal::no_assignment_operator, EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) } - EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } - - EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } - EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } - - protected: LhsNested m_lhs; RhsNested m_rhs; + RealScalar m_tolerance; + bool m_conservative; }; +// sparse = sparse * sparse +template +template +inline Derived& SparseMatrixBase::operator=(const SparseSparseProduct& product) +{ + product.evalTo(derived()); + return derived(); +} + +/** \returns an expression of the product of two sparse matrices. + * By default a conservative product preserving the symbolic non zeros is performed. + * The automatic pruning of the small values can be achieved by calling the pruned() function + * in which case a totally different product algorithm is employed: + * \code + * C = (A*B).pruned(); // supress numerical zeros (exact) + * C = (A*B).pruned(ref); + * C = (A*B).pruned(ref,epsilon); + * \endcode + * where \c ref is a meaningful non zero reference value. + * */ +template +template +inline const typename SparseSparseProductReturnType::Type +SparseMatrixBase::operator*(const SparseMatrixBase &other) const +{ + return typename SparseSparseProductReturnType::Type(derived(), other.derived()); +} + +} // end namespace Eigen + #endif // EIGEN_SPARSEPRODUCT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseRedux.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h similarity index 97% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseRedux.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h index afc49de7a..73fb9a318 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseRedux.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h @@ -25,12 +25,14 @@ #ifndef EIGEN_SPARSEREDUX_H #define EIGEN_SPARSEREDUX_H +namespace Eigen { + template typename internal::traits::Scalar SparseMatrixBase::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); - Scalar res = 0; + Scalar res(0); for (Index j=0; j::sum() const return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } +} // end namespace Eigen + #endif // EIGEN_SPARSEREDUX_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSelfAdjointView.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h similarity index 77% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSelfAdjointView.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h index d82044c78..c925a894d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSelfAdjointView.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -25,8 +25,10 @@ #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H #define EIGEN_SPARSE_SELFADJOINTVIEW_H -/** \class SparseSelfAdjointView - * +namespace Eigen { + +/** \ingroup SparseCore_Module + * \class SparseSelfAdjointView * * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix. * @@ -45,9 +47,6 @@ class SparseSelfAdjointTimeDenseProduct; template class DenseTimeSparseSelfAdjointProduct; -template -class SparseSymmetricPermutationProduct; - namespace internal { template @@ -106,9 +105,6 @@ template class SparseSelfAdjointView * * \returns a reference to \c *this * - * Note that it is faster to set alpha=0 than initializing the matrix to zero - * and then keep the default value alpha=1. - * * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply * call this function with u.adjoint(). */ @@ -116,21 +112,21 @@ template class SparseSelfAdjointView SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, Scalar alpha = Scalar(1)); /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ - template void evalTo(SparseMatrix& _dest) const + template void evalTo(SparseMatrix& _dest) const { internal::permute_symm_to_fullsymm(m_matrix, _dest); } - template void evalTo(DynamicSparseMatrix& _dest) const + template void evalTo(DynamicSparseMatrix& _dest) const { // TODO directly evaluate into _dest; - SparseMatrix tmp(_dest.rows(),_dest.cols()); + SparseMatrix tmp(_dest.rows(),_dest.cols()); internal::permute_symm_to_fullsymm(m_matrix, tmp); _dest = tmp; } - /** \returns an expression of P^-1 H P */ - SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix& perm) const + /** \returns an expression of P H P^-1 */ + SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm); } @@ -141,6 +137,20 @@ template class SparseSelfAdjointView permutedMatrix.evalTo(*this); return *this; } + + + SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) + { + PermutationMatrix pnull; + return *this = src.twistedBy(pnull); + } + + template + SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src) + { + PermutationMatrix pnull; + return *this = src.twistedBy(pnull); + } // const SparseLLT llt() const; @@ -148,7 +158,7 @@ template class SparseSelfAdjointView protected: - const typename MatrixType::Nested m_matrix; + typename MatrixType::Nested m_matrix; mutable VectorI m_countPerRow; mutable VectorI m_countPerCol; }; @@ -230,12 +240,15 @@ class SparseSelfAdjointTimeDenseProduct for (Index j=0; j dest_j(dest.row(LhsIsRowMajor ? j : 0)); for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) { Index a = LhsIsRowMajor ? j : i.index(); @@ -300,7 +313,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrixj) || (UpLo==Upper && ic) || ( UpLo==Upper && rj) || (UpLo==Upper && ic) || ( (UpLo&Upper)==Upper && r -void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +template +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) { typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; - typedef SparseMatrix Dest; - Dest& dest(_dest.derived()); + SparseMatrix& dest(_dest.derived()); typedef Matrix VectorI; - //internal::conj_if cj; + enum { + SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor, + StorageOrderMatch = int(SrcOrder) == int(DstOrder), + DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo, + SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo + }; Index size = mat.rows(); VectorI count(size); @@ -379,37 +412,40 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrixj)) + if((int(SrcUpLo)==int(Lower) && ij)) continue; Index ip = perm ? perm[i] : i; - count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; + count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; } } - dest._outerIndexPtr()[0] = 0; + dest.outerIndexPtr()[0] = 0; for(Index j=0; jj)) + if((int(SrcUpLo)==int(Lower) && ij)) continue; + Index jp = perm ? perm[j] : j; Index ip = perm? perm[i] : i; - Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; - dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp); - if((DstUpLo==Lower && ipjp)) - dest._valuePtr()[k] = conj(it.value()); + Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; + dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp); + + if(!StorageOrderMatch) std::swap(ip,jp); + if( ((int(DstUpLo)==int(Lower) && ipjp))) + dest.valuePtr()[k] = conj(it.value()); else - dest._valuePtr()[k] = it.value(); + dest.valuePtr()[k] = it.value(); } } } @@ -420,10 +456,12 @@ template class SparseSymmetricPermutationProduct : public EigenBase > { - typedef PermutationMatrix Perm; public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; + protected: + typedef PermutationMatrix Perm; + public: typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; @@ -435,7 +473,8 @@ class SparseSymmetricPermutationProduct inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } - template void evalTo(SparseMatrix& _dest) const + template + void evalTo(SparseMatrix& _dest) const { internal::permute_symm_to_fullsymm(m_matrix,_dest,m_perm.indices().data()); } @@ -446,9 +485,11 @@ class SparseSymmetricPermutationProduct } protected: - const MatrixTypeNested m_matrix; + MatrixTypeNested m_matrix; const Perm& m_perm; }; +} // end namespace Eigen + #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h new file mode 100644 index 000000000..abd4fda82 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -0,0 +1,164 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H +#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H + +namespace Eigen { + +namespace internal { + + +// perform a pseudo in-place sparse * sparse product assuming all matrices are col major +template +static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance) +{ + // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); + + typedef typename remove_all::type::Scalar Scalar; + typedef typename remove_all::type::Index Index; + + // make sure to call innerSize/outerSize since we fake the storage order. + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); + //int size = lhs.outerSize(); + eigen_assert(lhs.outerSize() == rhs.innerSize()); + + // allocate a temporary buffer + AmbiVector tempVector(rows); + + // estimate the number of non zero entries + // given a rhs column containing Y non zeros, we assume that the respective Y columns + // of the lhs differs in average of one non zeros, thus the number of non zeros for + // the product of a rhs column with the lhs is X+Y where X is the average number of non zero + // per column of the lhs. + // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) + Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); + + // mimics a resizeByInnerOuter: + if(ResultType::IsRowMajor) + res.resize(cols, rows); + else + res.resize(rows, cols); + + res.reserve(estimated_nnz_prod); + double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols()); + for (Index j=0; j::Iterator it(tempVector,tolerance); it; ++it) + res.insertBackByOuterInner(j,it.index()) = it.value(); + } + res.finalize(); +} + +template::Flags&RowMajorBit, + int RhsStorageOrder = traits::Flags&RowMajorBit, + int ResStorageOrder = traits::Flags&RowMajorBit> +struct sparse_sparse_product_with_pruning_selector; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename traits::type>::Scalar Scalar; + typedef typename ResultType::RealScalar RealScalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + { + typename remove_all::type _res(res.rows(), res.cols()); + internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); + res.swap(_res); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + { + // we need a col-major matrix to hold the result + typedef SparseMatrix SparseTemporaryType; + SparseTemporaryType _res(res.rows(), res.cols()); + internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); + res = _res; + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + { + // let's transpose the product to get a column x column product + typename remove_all::type _res(res.rows(), res.cols()); + internal::sparse_sparse_product_with_pruning_impl(rhs, lhs, _res, tolerance); + res.swap(_res); + } +}; + +template +struct sparse_sparse_product_with_pruning_selector +{ + typedef typename ResultType::RealScalar RealScalar; + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + { + typedef SparseMatrix ColMajorMatrix; + ColMajorMatrix colLhs(lhs); + ColMajorMatrix colRhs(rhs); + internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); + + // let's transpose the product to get a column x column product +// typedef SparseMatrix SparseTemporaryType; +// SparseTemporaryType _res(res.cols(), res.rows()); +// sparse_sparse_product_with_pruning_impl(rhs, lhs, _res); +// res = _res.transpose(); + } +}; + +// NOTE the 2 others cases (col row *) must never occur since they are caught +// by ProductReturnType which transforms it to (col col *) by evaluating rhs. + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTranspose.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTranspose.h similarity index 73% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTranspose.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTranspose.h index 2aea2fa32..07d9e0bbd 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseTranspose.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTranspose.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSETRANSPOSE_H #define EIGEN_SPARSETRANSPOSE_H +namespace Eigen { + template class TransposeImpl : public SparseMatrixBase > { @@ -39,17 +41,21 @@ template class TransposeImpl inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } }; +// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl:: in front of Index, +// a typedef typename TransposeImpl::Index Index; +// does not fix the issue. +// An alternative is to define the nested class in the parent class itself. template class TransposeImpl::InnerIterator : public _MatrixTypeNested::InnerIterator { typedef typename _MatrixTypeNested::InnerIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, Index outer) + EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl::Index outer) : Base(trans.derived().nestedExpression(), outer) {} - inline Index row() const { return Base::col(); } - inline Index col() const { return Base::row(); } + inline typename TransposeImpl::Index row() const { return Base::col(); } + inline typename TransposeImpl::Index col() const { return Base::row(); } }; template class TransposeImpl::ReverseInnerIterator @@ -58,11 +64,13 @@ template class TransposeImpl::ReverseInn typedef typename _MatrixTypeNested::ReverseInnerIterator Base; public: - EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, Index outer) + EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl::Index outer) : Base(xpr.derived().nestedExpression(), outer) {} - inline Index row() const { return Base::col(); } - inline Index col() const { return Base::row(); } + inline typename TransposeImpl::Index row() const { return Base::col(); } + inline typename TransposeImpl::Index col() const { return Base::row(); } }; +} // end namespace Eigen + #endif // EIGEN_SPARSETRANSPOSE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTriangularView.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTriangularView.h new file mode 100644 index 000000000..59aab5756 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseTriangularView.h @@ -0,0 +1,179 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H +#define EIGEN_SPARSE_TRIANGULARVIEW_H + +namespace Eigen { + +namespace internal { + +template +struct traits > +: public traits +{}; + +} // namespace internal + +template class SparseTriangularView + : public SparseMatrixBase > +{ + enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit)) + || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)), + SkipLast = !SkipFirst, + HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 + }; + + public: + + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView) + + class InnerIterator; + class ReverseInnerIterator; + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename internal::remove_reference::type MatrixTypeNestedNonRef; + typedef typename internal::remove_all::type MatrixTypeNestedCleaned; + + inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {} + + /** \internal */ + inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } + + template + typename internal::plain_matrix_type_column_major::type + solve(const MatrixBase& other) const; + + template void solveInPlace(MatrixBase& other) const; + template void solveInPlace(SparseMatrixBase& other) const; + + protected: + MatrixTypeNested m_matrix; +}; + +template +class SparseTriangularView::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator +{ + typedef typename MatrixTypeNestedCleaned::InnerIterator Base; + public: + + EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer) + : Base(view.nestedExpression(), outer), m_returnOne(false) + { + if(SkipFirst) + { + while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()=Base::outer())) + { + if((!SkipFirst) && Base::operator bool()) + Base::operator++(); + m_returnOne = true; + } + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { + if(HasUnitDiag && m_returnOne) + m_returnOne = false; + else + { + Base::operator++(); + if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer())) + { + if((!SkipFirst) && Base::operator bool()) + Base::operator++(); + m_returnOne = true; + } + } + return *this; + } + + inline Index row() const { return Base::row(); } + inline Index col() const { return Base::col(); } + inline Index index() const + { + if(HasUnitDiag && m_returnOne) return Base::outer(); + else return Base::index(); + } + inline Scalar value() const + { + if(HasUnitDiag && m_returnOne) return Scalar(1); + else return Base::value(); + } + + EIGEN_STRONG_INLINE operator bool() const + { + if(HasUnitDiag && m_returnOne) + return true; + return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer())); + } + protected: + bool m_returnOne; +}; + +template +class SparseTriangularView::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator +{ + typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer) + : Base(view.nestedExpression(), outer) + { + eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal"); + if(SkipLast) + while((*this) && this->index()>outer) + --(*this); + } + + EIGEN_STRONG_INLINE InnerIterator& operator--() + { Base::operator--(); return *this; } + + inline Index row() const { return Base::row(); } + inline Index col() const { return Base::col(); } + + EIGEN_STRONG_INLINE operator bool() const + { + return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer()); + } +}; + +template +template +inline const SparseTriangularView +SparseMatrixBase::triangularView() const +{ + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_TRIANGULARVIEW_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseUtil.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseUtil.h similarity index 67% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseUtil.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseUtil.h index db9ae98e7..050b65253 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseUtil.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseUtil.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSEUTIL_H #define EIGEN_SPARSEUTIL_H +namespace Eigen { + #ifdef NDEBUG #define EIGEN_DBG_SPARSE(X) #else @@ -58,22 +60,22 @@ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ - typedef typename Eigen::internal::traits::Scalar Scalar; \ + typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ - typedef typename Eigen::internal::nested::type Nested; \ - typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ - Flags = Eigen::internal::traits::Flags, \ - CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ + typedef typename Eigen::internal::nested::type Nested; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::traits::Index Index; \ + enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ + Flags = Eigen::internal::traits::Flags, \ + CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ using Base::derived; \ using Base::const_cast_derived; #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ - _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) + _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) const int CoherentAccessPattern = 0x1; const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern; @@ -100,20 +102,43 @@ template class SparseDenseOuterProdu template struct SparseSparseProductReturnType; template::ColsAtCompileTime> struct DenseSparseProductReturnType; template::ColsAtCompileTime> struct SparseDenseProductReturnType; +template class SparseSymmetricPermutationProduct; namespace internal { -template struct eval -{ - typedef typename traits::Scalar _Scalar; - enum { - _Flags = traits::Flags - }; +template struct sparse_eval; +template struct eval + : public sparse_eval::RowsAtCompileTime,traits::ColsAtCompileTime> +{}; + +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + enum { _Flags = traits::Flags| RowMajorBit }; + public: + typedef SparseVector<_Scalar, _Flags> type; +}; + +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + enum { _Flags = traits::Flags & (~RowMajorBit) }; + public: + typedef SparseVector<_Scalar, _Flags> type; +}; + +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + enum { _Flags = traits::Flags }; public: typedef SparseMatrix<_Scalar, _Flags> type; }; +template struct sparse_eval { + typedef typename traits::Scalar _Scalar; + public: + typedef Matrix<_Scalar, 1, 1> type; +}; + template struct plain_matrix_type { typedef typename traits::Scalar _Scalar; @@ -127,4 +152,37 @@ template struct plain_matrix_type } // end namespace internal +/** \ingroup SparseCore_Module + * + * \class Triplet + * + * \brief A small structure to hold a non zero as a triplet (i,j,value). + * + * \sa SparseMatrix::setFromTriplets() + */ +template +class Triplet +{ +public: + Triplet() : m_row(0), m_col(0), m_value(0) {} + + Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0)) + : m_row(i), m_col(j), m_value(v) + {} + + /** \returns the row index of the element */ + const Index& row() const { return m_row; } + + /** \returns the column index of the element */ + const Index& col() const { return m_col; } + + /** \returns the value of the element */ + const Scalar& value() const { return m_value; } +protected: + Index m_row, m_col; + Scalar m_value; +}; + +} // end namespace Eigen + #endif // EIGEN_SPARSEUTIL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseVector.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseVector.h similarity index 70% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseVector.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseVector.h index ce4bb51a2..e81347705 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseVector.h @@ -25,7 +25,10 @@ #ifndef EIGEN_SPARSEVECTOR_H #define EIGEN_SPARSEVECTOR_H -/** \class SparseVector +namespace Eigen { + +/** \ingroup SparseCore_Module + * \class SparseVector * * \brief a sparse vector class * @@ -46,13 +49,13 @@ struct traits > typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { - IsColVector = _Options & RowMajorBit ? 0 : 1, + IsColVector = (_Options & RowMajorBit) ? 0 : 1, RowsAtCompileTime = IsColVector ? Dynamic : 1, ColsAtCompileTime = IsColVector ? 1 : Dynamic, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, - Flags = _Options | NestByRefBit | LvalueBit, + Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit), CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = InnerRandomAccessPattern }; @@ -67,7 +70,6 @@ class SparseVector EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) -// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =) protected: public: @@ -79,11 +81,11 @@ class SparseVector Options = _Options }; - CompressedStorage m_data; + internal::CompressedStorage m_data; Index m_size; - CompressedStorage& _data() { return m_data; } - CompressedStorage& _data() const { return m_data; } + internal::CompressedStorage& _data() { return m_data; } + internal::CompressedStorage& _data() const { return m_data; } public: @@ -91,13 +93,12 @@ class SparseVector EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } EIGEN_STRONG_INLINE Index outerSize() const { return 1; } - EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; } - EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); } - EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); } + EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); } + EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); } - EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); } - EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); } + EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); } + EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); } inline Scalar coeff(Index row, Index col) const { @@ -126,6 +127,7 @@ class SparseVector public: class InnerIterator; + class ReverseInnerIterator; inline void setZero() { m_data.clear(); } @@ -134,11 +136,13 @@ class SparseVector inline void startVec(Index outer) { + EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } inline Scalar& insertBackByOuterInner(Index outer, Index inner) { + EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); return insertBack(inner); } @@ -158,7 +162,7 @@ class SparseVector Scalar& insert(Index i) { Index startId = 0; - Index p = m_data.size() - 1; + Index p = Index(m_data.size()) - 1; // TODO smart realloc m_data.resize(p+2,1); @@ -205,13 +209,6 @@ class SparseVector inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); } - template - inline SparseVector(const MatrixBase& other) - : m_size(0) - { - *this = other.derived(); - } - template inline SparseVector(const SparseMatrixBase& other) : m_size(0) @@ -249,9 +246,9 @@ class SparseVector inline SparseVector& operator=(const SparseMatrixBase& other) { if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime)) - return Base::operator=(other.transpose()); + return assign(other.transpose()); else - return Base::operator=(other); + return assign(other); } #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -262,56 +259,6 @@ class SparseVector } #endif -// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); -// if (needToTranspose) -// { -// // two passes algorithm: -// // 1 - compute the number of coeffs per dest inner vector -// // 2 - do the actual copy/eval -// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed -// typedef typename internal::nested::type OtherCopy; -// OtherCopy otherCopy(other.derived()); -// typedef typename internal::remove_all::type _OtherCopy; -// -// resize(other.rows(), other.cols()); -// Eigen::Map(m_outerIndex,outerSize()).setZero(); -// // pass 1 -// // FIXME the above copy could be merged with that pass -// for (int j=0; j::operator=(other.derived()); -// } -// } - friend std::ostream & operator << (std::ostream & s, const SparseVector& m) { for (Index i=0; i + EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase& _other) + { + const OtherDerived& other(_other.derived()); + const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + if(needToTranspose) + { + Index size = other.size(); + Index nnz = other.nonZeros(); + resize(size); + reserve(nnz); + for(Index i=0; i @@ -399,18 +351,14 @@ class SparseVector::InnerIterator InnerIterator(const SparseVector& vec, Index outer=0) : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) { + EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } - InnerIterator(const CompressedStorage& data) + InnerIterator(const internal::CompressedStorage& data) : m_data(data), m_id(0), m_end(static_cast(m_data.size())) {} - template - InnerIterator(const Flagged& vec, Index ) - : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size()) - {} - inline InnerIterator& operator++() { m_id++; return *this; } inline Scalar value() const { return m_data.value(m_id); } @@ -423,9 +371,43 @@ class SparseVector::InnerIterator inline operator bool() const { return (m_id < m_end); } protected: - const CompressedStorage& m_data; + const internal::CompressedStorage& m_data; Index m_id; const Index m_end; }; +template +class SparseVector::ReverseInnerIterator +{ + public: + ReverseInnerIterator(const SparseVector& vec, Index outer=0) + : m_data(vec.m_data), m_id(static_cast(m_data.size())), m_start(0) + { + EIGEN_UNUSED_VARIABLE(outer); + eigen_assert(outer==0); + } + + ReverseInnerIterator(const internal::CompressedStorage& data) + : m_data(data), m_id(static_cast(m_data.size())), m_start(0) + {} + + inline ReverseInnerIterator& operator--() { m_id--; return *this; } + + inline Scalar value() const { return m_data.value(m_id-1); } + inline Scalar& valueRef() { return const_cast(m_data.value(m_id-1)); } + + inline Index index() const { return m_data.index(m_id-1); } + inline Index row() const { return IsColVector ? index() : 0; } + inline Index col() const { return IsColVector ? 0 : index(); } + + inline operator bool() const { return (m_id > m_start); } + + protected: + const internal::CompressedStorage& m_data; + Index m_id; + const Index m_start; +}; + +} // end namespace Eigen + #endif // EIGEN_SPARSEVECTOR_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseView.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseView.h similarity index 91% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseView.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseView.h index 243065610..43a3adb24 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseView.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseView.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2011 Gael Guennebaud // Copyright (C) 2010 Daniel Lowengrub // // Eigen is free software; you can redistribute it and/or @@ -26,6 +26,8 @@ #ifndef EIGEN_SPARSEVIEW_H #define EIGEN_SPARSEVIEW_H +namespace Eigen { + namespace internal { template @@ -61,7 +63,7 @@ public: inline Index outerSize() const { return m_matrix.outerSize(); } protected: - const MatrixTypeNested m_matrix; + MatrixTypeNested m_matrix; Scalar m_reference; typename NumTraits::Real m_epsilon; }; @@ -92,10 +94,10 @@ protected: private: void incrementToNonZero() { - while(internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon) && (bool(*this))) - { - IterBase::operator++(); - } + while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon)) + { + IterBase::operator++(); + } } }; @@ -106,4 +108,6 @@ const SparseView MatrixBase::sparseView(const Scalar& m_refere return SparseView(derived(), m_reference, m_epsilon); } +} // end namespace Eigen + #endif diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/TriangularSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h similarity index 89% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/TriangularSolver.h rename to gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h index 62bb8bb44..9a45e8f41 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/TriangularSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/TriangularSolver.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSETRIANGULARSOLVER_H #define EIGEN_SPARSETRIANGULARSOLVER_H +namespace Eigen { + namespace internal { template for(int i=0; i for(int i=lhs.rows()-1 ; i>=0 ; --i) { Scalar tmp = other.coeff(i,col); + Scalar l_ii = 0; typename Lhs::InnerIterator it(lhs, i); - if (it && it.index() == i) + while(it && it.index() if (Mode & UnitDiag) other.coeffRef(i,col) = tmp; else - { - typename Lhs::InnerIterator it(lhs, i); - eigen_assert(it && it.index() == i); - other.coeffRef(i,col) = tmp/it.value(); - } + other.coeffRef(i,col) = tmp/l_ii; } } } @@ -118,9 +125,11 @@ struct sparse_solve_triangular_selector if (tmp!=Scalar(0)) // optimization when other is actually sparse { typename Lhs::InnerIterator it(lhs, i); + while(it && it.index() { if(!(Mode & UnitDiag)) { - // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the - // last element of the column ! - other.coeffRef(i,col) /= lhs.innerVector(i).lastCoeff(); + // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements + typename Lhs::ReverseInnerIterator it(lhs, i); + while(it && it.index()!=i) + --it; + eigen_assert(it && it.index()==i); + other.coeffRef(i,col) /= it.value(); } typename Lhs::InnerIterator it(lhs, i); for(; it && it.index() template void SparseTriangularView::solveInPlace(MatrixBase& other) const { - eigen_assert(m_matrix.cols() == m_matrix.rows()); - eigen_assert(m_matrix.cols() == other.rows()); - eigen_assert(!(Mode & ZeroDiag)); - eigen_assert((Mode & (Upper|Lower)) != 0); + eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows()); + eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); enum { copy = internal::traits::Flags & RowMajorBit }; @@ -295,10 +305,8 @@ template template void SparseTriangularView::solveInPlace(SparseMatrixBase& other) const { - eigen_assert(m_matrix.cols() == m_matrix.rows()); - eigen_assert(m_matrix.cols() == other.rows()); - eigen_assert(!(Mode & ZeroDiag)); - eigen_assert((Mode & (Upper|Lower)) != 0); + eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows()); + eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); // enum { copy = internal::traits::Flags & RowMajorBit }; @@ -336,4 +344,6 @@ SparseMatrixBase::solveTriangular(const MatrixBase& other } #endif // EIGEN2_SUPPORT +} // end namespace Eigen + #endif // EIGEN_SPARSETRIANGULARSOLVER_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/CMakeLists.txt new file mode 100644 index 000000000..b28ebe583 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_SuperLUSupport_SRCS "*.h") + +INSTALL(FILES + ${Eigen_SuperLUSupport_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SuperLUSupport COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h new file mode 100644 index 000000000..6c3eb6858 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -0,0 +1,1040 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SUPERLUSUPPORT_H +#define EIGEN_SUPERLUSUPPORT_H + +namespace Eigen { + +#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \ + extern "C" { \ + typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t; \ + extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ + char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ + void *, int, SuperMatrix *, SuperMatrix *, \ + FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \ + PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \ + } \ + inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ + int *perm_c, int *perm_r, int *etree, char *equed, \ + FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ + SuperMatrix *U, void *work, int lwork, \ + SuperMatrix *B, SuperMatrix *X, \ + FLOATTYPE *recip_pivot_growth, \ + FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ + SuperLUStat_t *stats, int *info, KEYTYPE) { \ + PREFIX##mem_usage_t mem_usage; \ + PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ + U, work, lwork, B, X, recip_pivot_growth, rcond, \ + ferr, berr, &mem_usage, stats, info); \ + return mem_usage.for_lu; /* bytes used by the factor storage */ \ + } + +DECL_GSSVX(s,float,float) +DECL_GSSVX(c,float,std::complex) +DECL_GSSVX(d,double,double) +DECL_GSSVX(z,double,std::complex) + +#ifdef MILU_ALPHA +#define EIGEN_SUPERLU_HAS_ILU +#endif + +#ifdef EIGEN_SUPERLU_HAS_ILU + +// similarly for the incomplete factorization using gsisx +#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \ + extern "C" { \ + extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ + char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ + void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \ + PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \ + } \ + inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \ + int *perm_c, int *perm_r, int *etree, char *equed, \ + FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ + SuperMatrix *U, void *work, int lwork, \ + SuperMatrix *B, SuperMatrix *X, \ + FLOATTYPE *recip_pivot_growth, \ + FLOATTYPE *rcond, \ + SuperLUStat_t *stats, int *info, KEYTYPE) { \ + PREFIX##mem_usage_t mem_usage; \ + PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ + U, work, lwork, B, X, recip_pivot_growth, rcond, \ + &mem_usage, stats, info); \ + return mem_usage.for_lu; /* bytes used by the factor storage */ \ + } + +DECL_GSISX(s,float,float) +DECL_GSISX(c,float,std::complex) +DECL_GSISX(d,double,double) +DECL_GSISX(z,double,std::complex) + +#endif + +template +struct SluMatrixMapHelper; + +/** \internal + * + * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices + * and dense matrices. Supernodal and other fancy format are not supported by this wrapper. + * + * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure. + */ +struct SluMatrix : SuperMatrix +{ + SluMatrix() + { + Store = &storage; + } + + SluMatrix(const SluMatrix& other) + : SuperMatrix(other) + { + Store = &storage; + storage = other.storage; + } + + SluMatrix& operator=(const SluMatrix& other) + { + SuperMatrix::operator=(static_cast(other)); + Store = &storage; + storage = other.storage; + return *this; + } + + struct + { + union {int nnz;int lda;}; + void *values; + int *innerInd; + int *outerInd; + } storage; + + void setStorageType(Stype_t t) + { + Stype = t; + if (t==SLU_NC || t==SLU_NR || t==SLU_DN) + Store = &storage; + else + { + eigen_assert(false && "storage type not supported"); + Store = 0; + } + } + + template + void setScalarType() + { + if (internal::is_same::value) + Dtype = SLU_S; + else if (internal::is_same::value) + Dtype = SLU_D; + else if (internal::is_same >::value) + Dtype = SLU_C; + else if (internal::is_same >::value) + Dtype = SLU_Z; + else + { + eigen_assert(false && "Scalar type not supported by SuperLU"); + } + } + + template + static SluMatrix Map(MatrixBase& _mat) + { + MatrixType& mat(_mat.derived()); + eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU"); + SluMatrix res; + res.setStorageType(SLU_DN); + res.setScalarType(); + res.Mtype = SLU_GE; + + res.nrow = mat.rows(); + res.ncol = mat.cols(); + + res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride(); + res.storage.values = mat.data(); + return res; + } + + template + static SluMatrix Map(SparseMatrixBase& mat) + { + SluMatrix res; + if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) + { + res.setStorageType(SLU_NR); + res.nrow = mat.cols(); + res.ncol = mat.rows(); + } + else + { + res.setStorageType(SLU_NC); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + } + + res.Mtype = SLU_GE; + + res.storage.nnz = mat.nonZeros(); + res.storage.values = mat.derived().valuePtr(); + res.storage.innerInd = mat.derived().innerIndexPtr(); + res.storage.outerInd = mat.derived().outerIndexPtr(); + + res.setScalarType(); + + // FIXME the following is not very accurate + if (MatrixType::Flags & Upper) + res.Mtype = SLU_TRU; + if (MatrixType::Flags & Lower) + res.Mtype = SLU_TRL; + + eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU"); + + return res; + } +}; + +template +struct SluMatrixMapHelper > +{ + typedef Matrix MatrixType; + static void run(MatrixType& mat, SluMatrix& res) + { + eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); + res.setStorageType(SLU_DN); + res.setScalarType(); + res.Mtype = SLU_GE; + + res.nrow = mat.rows(); + res.ncol = mat.cols(); + + res.storage.lda = mat.outerStride(); + res.storage.values = mat.data(); + } +}; + +template +struct SluMatrixMapHelper > +{ + typedef Derived MatrixType; + static void run(MatrixType& mat, SluMatrix& res) + { + if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) + { + res.setStorageType(SLU_NR); + res.nrow = mat.cols(); + res.ncol = mat.rows(); + } + else + { + res.setStorageType(SLU_NC); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + } + + res.Mtype = SLU_GE; + + res.storage.nnz = mat.nonZeros(); + res.storage.values = mat.valuePtr(); + res.storage.innerInd = mat.innerIndexPtr(); + res.storage.outerInd = mat.outerIndexPtr(); + + res.setScalarType(); + + // FIXME the following is not very accurate + if (MatrixType::Flags & Upper) + res.Mtype = SLU_TRU; + if (MatrixType::Flags & Lower) + res.Mtype = SLU_TRL; + + eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU"); + } +}; + +namespace internal { + +template +SluMatrix asSluMatrix(MatrixType& mat) +{ + return SluMatrix::Map(mat); +} + +/** View a Super LU matrix as an Eigen expression */ +template +MappedSparseMatrix map_superlu(SluMatrix& sluMat) +{ + eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR + || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC); + + Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow; + + return MappedSparseMatrix( + sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize], + sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast(sluMat.storage.values) ); +} + +} // end namespace internal + +/** \ingroup SuperLUSupport_Module + * \class SuperLUBase + * \brief The base class for the direct and incomplete LU factorization of SuperLU + */ +template +class SuperLUBase : internal::noncopyable +{ + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef Matrix Vector; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef SparseMatrix LUMatrixType; + + public: + + SuperLUBase() {} + + ~SuperLUBase() + { + clearFactors(); + } + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */ + inline superlu_options_t& options() { return m_sluOptions; } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + void compute(const MatrixType& matrix) + { + derived().analyzePattern(matrix); + derived().factorize(matrix); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::solve_retval solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "SuperLU is not initialized."); + eigen_assert(rows()==b.rows() + && "SuperLU::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ +// template +// inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const +// { +// eigen_assert(m_isInitialized && "SuperLU is not initialized."); +// eigen_assert(rows()==b.rows() +// && "SuperLU::solve(): invalid number of rows of the right hand side matrix b"); +// return internal::sparse_solve_retval(*this, b.derived()); +// } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& /*matrix*/) + { + m_isInitialized = true; + m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + } + + template + void dumpMemory(Stream& s) + {} + + protected: + + void initFactorization(const MatrixType& a) + { + set_default_options(&this->m_sluOptions); + + const int size = a.rows(); + m_matrix = a; + + m_sluA = internal::asSluMatrix(m_matrix); + clearFactors(); + + m_p.resize(size); + m_q.resize(size); + m_sluRscale.resize(size); + m_sluCscale.resize(size); + m_sluEtree.resize(size); + + // set empty B and X + m_sluB.setStorageType(SLU_DN); + m_sluB.setScalarType(); + m_sluB.Mtype = SLU_GE; + m_sluB.storage.values = 0; + m_sluB.nrow = 0; + m_sluB.ncol = 0; + m_sluB.storage.lda = size; + m_sluX = m_sluB; + + m_extractedDataAreDirty = true; + } + + void init() + { + m_info = InvalidInput; + m_isInitialized = false; + m_sluL.Store = 0; + m_sluU.Store = 0; + } + + void extractData() const; + + void clearFactors() + { + if(m_sluL.Store) + Destroy_SuperNode_Matrix(&m_sluL); + if(m_sluU.Store) + Destroy_CompCol_Matrix(&m_sluU); + + m_sluL.Store = 0; + m_sluU.Store = 0; + + memset(&m_sluL,0,sizeof m_sluL); + memset(&m_sluU,0,sizeof m_sluU); + } + + // cached data to reduce reallocation, etc. + mutable LUMatrixType m_l; + mutable LUMatrixType m_u; + mutable IntColVectorType m_p; + mutable IntRowVectorType m_q; + + mutable LUMatrixType m_matrix; // copy of the factorized matrix + mutable SluMatrix m_sluA; + mutable SuperMatrix m_sluL, m_sluU; + mutable SluMatrix m_sluB, m_sluX; + mutable SuperLUStat_t m_sluStat; + mutable superlu_options_t m_sluOptions; + mutable std::vector m_sluEtree; + mutable Matrix m_sluRscale, m_sluCscale; + mutable Matrix m_sluFerr, m_sluBerr; + mutable char m_sluEqued; + + mutable ComputationInfo m_info; + bool m_isInitialized; + int m_factorizationIsOk; + int m_analysisIsOk; + mutable bool m_extractedDataAreDirty; + + private: + SuperLUBase(SuperLUBase& ) { } +}; + + +/** \ingroup SuperLUSupport_Module + * \class SuperLU + * \brief A sparse direct LU factorization and solver based on the SuperLU library + * + * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization + * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> > +{ + public: + typedef SuperLUBase<_MatrixType,SuperLU> Base; + typedef _MatrixType MatrixType; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + typedef typename Base::Index Index; + typedef typename Base::IntRowVectorType IntRowVectorType; + typedef typename Base::IntColVectorType IntColVectorType; + typedef typename Base::LUMatrixType LUMatrixType; + typedef TriangularView LMatrixType; + typedef TriangularView UMatrixType; + + public: + + SuperLU() : Base() { init(); } + + SuperLU(const MatrixType& matrix) : Base() + { + Base::init(); + compute(matrix); + } + + ~SuperLU() + { + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + m_info = InvalidInput; + m_isInitialized = false; + Base::analyzePattern(matrix); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix); + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void _solve(const MatrixBase &b, MatrixBase &dest) const; + #endif // EIGEN_PARSED_BY_DOXYGEN + + inline const LMatrixType& matrixL() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_l; + } + + inline const UMatrixType& matrixU() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_u; + } + + inline const IntColVectorType& permutationP() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_p; + } + + inline const IntRowVectorType& permutationQ() const + { + if (m_extractedDataAreDirty) this->extractData(); + return m_q; + } + + Scalar determinant() const; + + protected: + + using Base::m_matrix; + using Base::m_sluOptions; + using Base::m_sluA; + using Base::m_sluB; + using Base::m_sluX; + using Base::m_p; + using Base::m_q; + using Base::m_sluEtree; + using Base::m_sluEqued; + using Base::m_sluRscale; + using Base::m_sluCscale; + using Base::m_sluL; + using Base::m_sluU; + using Base::m_sluStat; + using Base::m_sluFerr; + using Base::m_sluBerr; + using Base::m_l; + using Base::m_u; + + using Base::m_analysisIsOk; + using Base::m_factorizationIsOk; + using Base::m_extractedDataAreDirty; + using Base::m_isInitialized; + using Base::m_info; + + void init() + { + Base::init(); + + set_default_options(&this->m_sluOptions); + m_sluOptions.PrintStat = NO; + m_sluOptions.ConditionNumber = NO; + m_sluOptions.Trans = NOTRANS; + m_sluOptions.ColPerm = COLAMD; + } + + + private: + SuperLU(SuperLU& ) { } +}; + +template +void SuperLU::factorize(const MatrixType& a) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + if(!m_analysisIsOk) + { + m_info = InvalidInput; + return; + } + + this->initFactorization(a); + + int info = 0; + RealScalar recip_pivot_growth, rcond; + RealScalar ferr, berr; + + StatInit(&m_sluStat); + SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], + &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &ferr, &berr, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + m_extractedDataAreDirty = true; + + // FIXME how to better check for errors ??? + m_info = info == 0 ? Success : NumericalIssue; + m_factorizationIsOk = true; +} + +template +template +void SuperLU::_solve(const MatrixBase &b, MatrixBase& x) const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); + + const int size = m_matrix.rows(); + const int rhsCols = b.cols(); + eigen_assert(size==b.rows()); + + m_sluOptions.Trans = NOTRANS; + m_sluOptions.Fact = FACTORED; + m_sluOptions.IterRefine = NOREFINE; + + + m_sluFerr.resize(rhsCols); + m_sluBerr.resize(rhsCols); + m_sluB = SluMatrix::Map(b.const_cast_derived()); + m_sluX = SluMatrix::Map(x.derived()); + + typename Rhs::PlainObject b_cpy; + if(m_sluEqued!='N') + { + b_cpy = b; + m_sluB = SluMatrix::Map(b_cpy.const_cast_derived()); + } + + StatInit(&m_sluStat); + int info = 0; + RealScalar recip_pivot_growth, rcond; + SuperLU_gssvx(&m_sluOptions, &m_sluA, + m_q.data(), m_p.data(), + &m_sluEtree[0], &m_sluEqued, + &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &m_sluFerr[0], &m_sluBerr[0], + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + m_info = info==0 ? Success : NumericalIssue; +} + +// the code of this extractData() function has been adapted from the SuperLU's Matlab support code, +// +// Copyright (c) 1994 by Xerox Corporation. All rights reserved. +// +// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY +// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. +// +template +void SuperLUBase::extractData() const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()"); + if (m_extractedDataAreDirty) + { + int upper; + int fsupc, istart, nsupr; + int lastl = 0, lastu = 0; + SCformat *Lstore = static_cast(m_sluL.Store); + NCformat *Ustore = static_cast(m_sluU.Store); + Scalar *SNptr; + + const int size = m_matrix.rows(); + m_l.resize(size,size); + m_l.resizeNonZeros(Lstore->nnz); + m_u.resize(size,size); + m_u.resizeNonZeros(Ustore->nnz); + + int* Lcol = m_l.outerIndexPtr(); + int* Lrow = m_l.innerIndexPtr(); + Scalar* Lval = m_l.valuePtr(); + + int* Ucol = m_u.outerIndexPtr(); + int* Urow = m_u.innerIndexPtr(); + Scalar* Uval = m_u.valuePtr(); + + Ucol[0] = 0; + Ucol[0] = 0; + + /* for each supernode */ + for (int k = 0; k <= Lstore->nsuper; ++k) + { + fsupc = L_FST_SUPC(k); + istart = L_SUB_START(fsupc); + nsupr = L_SUB_START(fsupc+1) - istart; + upper = 1; + + /* for each column in the supernode */ + for (int j = fsupc; j < L_FST_SUPC(k+1); ++j) + { + SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)]; + + /* Extract U */ + for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i) + { + Uval[lastu] = ((Scalar*)Ustore->nzval)[i]; + /* Matlab doesn't like explicit zero. */ + if (Uval[lastu] != 0.0) + Urow[lastu++] = U_SUB(i); + } + for (int i = 0; i < upper; ++i) + { + /* upper triangle in the supernode */ + Uval[lastu] = SNptr[i]; + /* Matlab doesn't like explicit zero. */ + if (Uval[lastu] != 0.0) + Urow[lastu++] = L_SUB(istart+i); + } + Ucol[j+1] = lastu; + + /* Extract L */ + Lval[lastl] = 1.0; /* unit diagonal */ + Lrow[lastl++] = L_SUB(istart + upper - 1); + for (int i = upper; i < nsupr; ++i) + { + Lval[lastl] = SNptr[i]; + /* Matlab doesn't like explicit zero. */ + if (Lval[lastl] != 0.0) + Lrow[lastl++] = L_SUB(istart+i); + } + Lcol[j+1] = lastl; + + ++upper; + } /* for j ... */ + + } /* for k ... */ + + // squeeze the matrices : + m_l.resizeNonZeros(lastl); + m_u.resizeNonZeros(lastu); + + m_extractedDataAreDirty = false; + } +} + +template +typename SuperLU::Scalar SuperLU::determinant() const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()"); + + if (m_extractedDataAreDirty) + this->extractData(); + + Scalar det = Scalar(1); + for (int j=0; j 0) + { + int lastId = m_u.outerIndexPtr()[j+1]-1; + eigen_assert(m_u.innerIndexPtr()[lastId]<=j); + if (m_u.innerIndexPtr()[lastId]==j) + det *= m_u.valuePtr()[lastId]; + } + } + if(m_sluEqued!='N') + return det/m_sluRscale.prod()/m_sluCscale.prod(); + else + return det; +} + +#ifdef EIGEN_PARSED_BY_DOXYGEN +#define EIGEN_SUPERLU_HAS_ILU +#endif + +#ifdef EIGEN_SUPERLU_HAS_ILU + +/** \ingroup SuperLUSupport_Module + * \class SuperILU + * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library + * + * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization + * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers. + * + * \warning This class requires SuperLU 4 or later. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \sa \ref TutorialSparseDirectSolvers, class ConjugateGradient, class BiCGSTAB + */ + +template +class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> > +{ + public: + typedef SuperLUBase<_MatrixType,SuperILU> Base; + typedef _MatrixType MatrixType; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + typedef typename Base::Index Index; + + public: + + SuperILU() : Base() { init(); } + + SuperILU(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~SuperILU() + { + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + Base::analyzePattern(matrix); + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix); + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void _solve(const MatrixBase &b, MatrixBase &dest) const; + #endif // EIGEN_PARSED_BY_DOXYGEN + + protected: + + using Base::m_matrix; + using Base::m_sluOptions; + using Base::m_sluA; + using Base::m_sluB; + using Base::m_sluX; + using Base::m_p; + using Base::m_q; + using Base::m_sluEtree; + using Base::m_sluEqued; + using Base::m_sluRscale; + using Base::m_sluCscale; + using Base::m_sluL; + using Base::m_sluU; + using Base::m_sluStat; + using Base::m_sluFerr; + using Base::m_sluBerr; + using Base::m_l; + using Base::m_u; + + using Base::m_analysisIsOk; + using Base::m_factorizationIsOk; + using Base::m_extractedDataAreDirty; + using Base::m_isInitialized; + using Base::m_info; + + void init() + { + Base::init(); + + ilu_set_default_options(&m_sluOptions); + m_sluOptions.PrintStat = NO; + m_sluOptions.ConditionNumber = NO; + m_sluOptions.Trans = NOTRANS; + m_sluOptions.ColPerm = MMD_AT_PLUS_A; + + // no attempt to preserve column sum + m_sluOptions.ILU_MILU = SILU; + // only basic ILU(k) support -- no direct control over memory consumption + // better to use ILU_DropRule = DROP_BASIC | DROP_AREA + // and set ILU_FillFactor to max memory growth + m_sluOptions.ILU_DropRule = DROP_BASIC; + m_sluOptions.ILU_DropTol = NumTraits::dummy_precision()*10; + } + + private: + SuperILU(SuperILU& ) { } +}; + +template +void SuperILU::factorize(const MatrixType& a) +{ + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + if(!m_analysisIsOk) + { + m_info = InvalidInput; + return; + } + + this->initFactorization(a); + + int info = 0; + RealScalar recip_pivot_growth, rcond; + + StatInit(&m_sluStat); + SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], + &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + // FIXME how to better check for errors ??? + m_info = info == 0 ? Success : NumericalIssue; + m_factorizationIsOk = true; +} + +template +template +void SuperILU::_solve(const MatrixBase &b, MatrixBase& x) const +{ + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); + + const int size = m_matrix.rows(); + const int rhsCols = b.cols(); + eigen_assert(size==b.rows()); + + m_sluOptions.Trans = NOTRANS; + m_sluOptions.Fact = FACTORED; + m_sluOptions.IterRefine = NOREFINE; + + m_sluFerr.resize(rhsCols); + m_sluBerr.resize(rhsCols); + m_sluB = SluMatrix::Map(b.const_cast_derived()); + m_sluX = SluMatrix::Map(x.derived()); + + typename Rhs::PlainObject b_cpy; + if(m_sluEqued!='N') + { + b_cpy = b; + m_sluB = SluMatrix::Map(b_cpy.const_cast_derived()); + } + + int info = 0; + RealScalar recip_pivot_growth, rcond; + + StatInit(&m_sluStat); + SuperLU_gsisx(&m_sluOptions, &m_sluA, + m_q.data(), m_p.data(), + &m_sluEtree[0], &m_sluEqued, + &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_growth, &rcond, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + m_info = info==0 ? Success : NumericalIssue; +} +#endif + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef SuperLUBase<_MatrixType,Derived> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve(rhs(),dst); + } +}; + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef SuperLUBase<_MatrixType,Derived> Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SUPERLUSUPPORT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/CMakeLists.txt b/gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/CMakeLists.txt new file mode 100644 index 000000000..a57de0020 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_UmfPackSupport_SRCS "*.h") + +INSTALL(FILES + ${Eigen_UmfPackSupport_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/UmfPackSupport COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h new file mode 100644 index 000000000..f98a4c8c0 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -0,0 +1,446 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_UMFPACKSUPPORT_H +#define EIGEN_UMFPACKSUPPORT_H + +namespace Eigen { + +/* TODO extract L, extract U, compute det, etc... */ + +// generic double/complex wrapper functions: + +inline void umfpack_free_numeric(void **Numeric, double) +{ umfpack_di_free_numeric(Numeric); *Numeric = 0; } + +inline void umfpack_free_numeric(void **Numeric, std::complex) +{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; } + +inline void umfpack_free_symbolic(void **Symbolic, double) +{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; } + +inline void umfpack_free_symbolic(void **Symbolic, std::complex) +{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; } + +inline int umfpack_symbolic(int n_row,int n_col, + const int Ap[], const int Ai[], const double Ax[], void **Symbolic, + const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) +{ + return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info); +} + +inline int umfpack_symbolic(int n_row,int n_col, + const int Ap[], const int Ai[], const std::complex Ax[], void **Symbolic, + const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) +{ + return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Control,Info); +} + +inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[], + void *Symbolic, void **Numeric, + const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) +{ + return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info); +} + +inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex Ax[], + void *Symbolic, void **Numeric, + const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) +{ + return umfpack_zi_numeric(Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info); +} + +inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[], + double X[], const double B[], void *Numeric, + const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) +{ + return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info); +} + +inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex Ax[], + std::complex X[], const std::complex B[], void *Numeric, + const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) +{ + return umfpack_zi_solve(sys,Ap,Ai,&internal::real_ref(Ax[0]),0,&internal::real_ref(X[0]),0,&internal::real_ref(B[0]),0,Numeric,Control,Info); +} + +inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double) +{ + return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); +} + +inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex) +{ + return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); +} + +inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[], + int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric) +{ + return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric); +} + +inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex Lx[], int Up[], int Ui[], std::complex Ux[], + int P[], int Q[], std::complex Dx[], int *do_recip, double Rs[], void *Numeric) +{ + double& lx0_real = internal::real_ref(Lx[0]); + double& ux0_real = internal::real_ref(Ux[0]); + double& dx0_real = internal::real_ref(Dx[0]); + return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q, + Dx?&dx0_real:0,0,do_recip,Rs,Numeric); +} + +inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) +{ + return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info); +} + +inline int umfpack_get_determinant(std::complex *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) +{ + double& mx_real = internal::real_ref(*Mx); + return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info); +} + +/** \ingroup UmfPackSupport_Module + * \brief A sparse LU factorization and solver based on UmfPack + * + * This class allows to solve for A.X = B sparse linear problems via a LU factorization + * using the UmfPack library. The sparse matrix A must be squared and full rank. + * The vectors or matrices X and B can be either dense or sparse. + * + * \WARNING The input matrix A should be in a \b compressed and \b column-major form. + * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix. + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class UmfPackLU : internal::noncopyable +{ + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef Matrix Vector; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef SparseMatrix LUMatrixType; + typedef SparseMatrix UmfpackMatrixType; + + public: + + UmfPackLU() { init(); } + + UmfPackLU(const MatrixType& matrix) + { + init(); + compute(matrix); + } + + ~UmfPackLU() + { + if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); + } + + inline Index rows() const { return m_copyMatrix.rows(); } + inline Index cols() const { return m_copyMatrix.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + inline const LUMatrixType& matrixL() const + { + if (m_extractedDataAreDirty) extractData(); + return m_l; + } + + inline const LUMatrixType& matrixU() const + { + if (m_extractedDataAreDirty) extractData(); + return m_u; + } + + inline const IntColVectorType& permutationP() const + { + if (m_extractedDataAreDirty) extractData(); + return m_p; + } + + inline const IntRowVectorType& permutationQ() const + { + if (m_extractedDataAreDirty) extractData(); + return m_q; + } + + /** Computes the sparse Cholesky decomposition of \a matrix + * Note that the matrix should be column-major, and in compressed format for best performance. + * \sa SparseMatrix::makeCompressed(). + */ + void compute(const MatrixType& matrix) + { + analyzePattern(matrix); + factorize(matrix); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::solve_retval solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "UmfPackLU is not initialized."); + eigen_assert(rows()==b.rows() + && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ +// template +// inline const internal::sparse_solve_retval solve(const SparseMatrixBase& b) const +// { +// eigen_assert(m_isInitialized && "UmfPAckLU is not initialized."); +// eigen_assert(rows()==b.rows() +// && "UmfPAckLU::solve(): invalid number of rows of the right hand side matrix b"); +// return internal::sparse_solve_retval(*this, b.derived()); +// } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize(), compute() + */ + void analyzePattern(const MatrixType& matrix) + { + if(m_symbolic) + umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) + umfpack_free_numeric(&m_numeric,Scalar()); + + grapInput(matrix); + + int errorCode = 0; + errorCode = umfpack_symbolic(matrix.rows(), matrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, + &m_symbolic, 0, 0); + + m_isInitialized = true; + m_info = errorCode ? InvalidInput : Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed. + * + * \sa analyzePattern(), compute() + */ + void factorize(const MatrixType& matrix) + { + eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()"); + if(m_numeric) + umfpack_free_numeric(&m_numeric,Scalar()); + + grapInput(matrix); + + int errorCode; + errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, + m_symbolic, &m_numeric, 0, 0); + + m_info = errorCode ? NumericalIssue : Success; + m_factorizationIsOk = true; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + bool _solve(const MatrixBase &b, MatrixBase &x) const; + #endif + + Scalar determinant() const; + + void extractData() const; + + protected: + + + void init() + { + m_info = InvalidInput; + m_isInitialized = false; + m_numeric = 0; + m_symbolic = 0; + m_outerIndexPtr = 0; + m_innerIndexPtr = 0; + m_valuePtr = 0; + } + + void grapInput(const MatrixType& mat) + { + m_copyMatrix.resize(mat.rows(), mat.cols()); + if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() ) + { + // non supported input -> copy + m_copyMatrix = mat; + m_outerIndexPtr = m_copyMatrix.outerIndexPtr(); + m_innerIndexPtr = m_copyMatrix.innerIndexPtr(); + m_valuePtr = m_copyMatrix.valuePtr(); + } + else + { + m_outerIndexPtr = mat.outerIndexPtr(); + m_innerIndexPtr = mat.innerIndexPtr(); + m_valuePtr = mat.valuePtr(); + } + } + + // cached data to reduce reallocation, etc. + mutable LUMatrixType m_l; + mutable LUMatrixType m_u; + mutable IntColVectorType m_p; + mutable IntRowVectorType m_q; + + UmfpackMatrixType m_copyMatrix; + const Scalar* m_valuePtr; + const int* m_outerIndexPtr; + const int* m_innerIndexPtr; + void* m_numeric; + void* m_symbolic; + + mutable ComputationInfo m_info; + bool m_isInitialized; + int m_factorizationIsOk; + int m_analysisIsOk; + mutable bool m_extractedDataAreDirty; + + private: + UmfPackLU(UmfPackLU& ) { } +}; + + +template +void UmfPackLU::extractData() const +{ + if (m_extractedDataAreDirty) + { + // get size of the data + int lnz, unz, rows, cols, nz_udiag; + umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar()); + + // allocate data + m_l.resize(rows,(std::min)(rows,cols)); + m_l.resizeNonZeros(lnz); + + m_u.resize((std::min)(rows,cols),cols); + m_u.resizeNonZeros(unz); + + m_p.resize(rows); + m_q.resize(cols); + + // extract + umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(), + m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(), + m_p.data(), m_q.data(), 0, 0, 0, m_numeric); + + m_extractedDataAreDirty = false; + } +} + +template +typename UmfPackLU::Scalar UmfPackLU::determinant() const +{ + Scalar det; + umfpack_get_determinant(&det, 0, m_numeric, 0); + return det; +} + +template +template +bool UmfPackLU::_solve(const MatrixBase &b, MatrixBase &x) const +{ + const int rhsCols = b.cols(); + eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet"); + eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet"); + + int errorCode; + for (int j=0; j +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef UmfPackLU<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef UmfPackLU<_MatrixType> Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_UMFPACKSUPPORT_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/misc/Image.h b/gtsam/3rdparty/Eigen/Eigen/src/misc/Image.h index 19b3e08cb..7643a0836 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/misc/Image.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/misc/Image.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MISC_IMAGE_H #define EIGEN_MISC_IMAGE_H +namespace Eigen { + namespace internal { /** \class image_retval_base @@ -92,4 +94,6 @@ template struct image_retval_base image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ : Base(dec, originalMatrix) {} +} // end namespace Eigen + #endif // EIGEN_MISC_IMAGE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/misc/Kernel.h b/gtsam/3rdparty/Eigen/Eigen/src/misc/Kernel.h index 0115970e8..37bc392ef 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/misc/Kernel.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/misc/Kernel.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MISC_KERNEL_H #define EIGEN_MISC_KERNEL_H +namespace Eigen { + namespace internal { /** \class kernel_retval_base @@ -89,4 +91,6 @@ template struct kernel_retval_base using Base::cols; \ kernel_retval(const DecompositionType& dec) : Base(dec) {} +} // end namespace Eigen + #endif // EIGEN_MISC_KERNEL_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/misc/Solve.h b/gtsam/3rdparty/Eigen/Eigen/src/misc/Solve.h index b7cbcadb3..2afd078d4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/misc/Solve.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/misc/Solve.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MISC_SOLVE_H #define EIGEN_MISC_SOLVE_H +namespace Eigen { + namespace internal { /** \class solve_retval_base @@ -66,7 +68,7 @@ template struct solve_retval_base protected: const DecompositionType& m_dec; - const typename Rhs::Nested m_rhs; + typename Rhs::Nested m_rhs; }; } // end namespace internal @@ -84,4 +86,6 @@ template struct solve_retval_base solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} +} // end namespace Eigen + #endif // EIGEN_MISC_SOLVE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/Solve.h b/gtsam/3rdparty/Eigen/Eigen/src/misc/SparseSolve.h similarity index 70% rename from gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/Solve.h rename to gtsam/3rdparty/Eigen/Eigen/src/misc/SparseSolve.h index 19449e9de..aca34b2d1 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/Solve.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/misc/SparseSolve.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SPARSE_SOLVE_H #define EIGEN_SPARSE_SOLVE_H +namespace Eigen { + namespace internal { template struct sparse_solve_retval_base; @@ -61,7 +63,7 @@ template struct sparse_solve_retval_b protected: const DecompositionType& m_dec; - const typename Rhs::Nested m_rhs; + typename Rhs::Nested m_rhs; }; #define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \ @@ -76,7 +78,49 @@ template struct sparse_solve_retval_b using Base::cols; \ sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} - + + + +template struct solve_retval_with_guess; + +template +struct traits > +{ + typedef typename DecompositionType::MatrixType MatrixType; + typedef Matrix ReturnType; +}; + +template struct solve_retval_with_guess + : public ReturnByValue > +{ + typedef typename DecompositionType::Index Index; + + solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess) + : m_dec(dec), m_rhs(rhs), m_guess(guess) + {} + + inline Index rows() const { return m_dec.cols(); } + inline Index cols() const { return m_rhs.cols(); } + + template inline void evalTo(Dest& dst) const + { + dst = m_guess; + m_dec._solveWithGuess(m_rhs,dst); + } + + protected: + const DecompositionType& m_dec; + const typename Rhs::Nested m_rhs; + const typename Guess::Nested m_guess; +}; + } // namepsace internal +} // end namespace Eigen + #endif // EIGEN_SPARSE_SOLVE_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/misc/blas.h b/gtsam/3rdparty/Eigen/Eigen/src/misc/blas.h new file mode 100644 index 000000000..6fce99ed5 --- /dev/null +++ b/gtsam/3rdparty/Eigen/Eigen/src/misc/blas.h @@ -0,0 +1,658 @@ +#ifndef BLAS_H +#define BLAS_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define BLASFUNC(FUNC) FUNC##_ + +#ifdef __WIN64__ +typedef long long BLASLONG; +typedef unsigned long long BLASULONG; +#else +typedef long BLASLONG; +typedef unsigned long BLASULONG; +#endif + +int BLASFUNC(xerbla)(const char *, int *info, int); + +float BLASFUNC(sdot) (int *, float *, int *, float *, int *); +float BLASFUNC(sdsdot)(int *, float *, float *, int *, float *, int *); + +double BLASFUNC(dsdot) (int *, float *, int *, float *, int *); +double BLASFUNC(ddot) (int *, double *, int *, double *, int *); +double BLASFUNC(qdot) (int *, double *, int *, double *, int *); + +int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*); +int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*); +int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*); +int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*); + +int BLASFUNC(saxpy) (int *, float *, float *, int *, float *, int *); +int BLASFUNC(daxpy) (int *, double *, double *, int *, double *, int *); +int BLASFUNC(qaxpy) (int *, double *, double *, int *, double *, int *); +int BLASFUNC(caxpy) (int *, float *, float *, int *, float *, int *); +int BLASFUNC(zaxpy) (int *, double *, double *, int *, double *, int *); +int BLASFUNC(xaxpy) (int *, double *, double *, int *, double *, int *); +int BLASFUNC(caxpyc)(int *, float *, float *, int *, float *, int *); +int BLASFUNC(zaxpyc)(int *, double *, double *, int *, double *, int *); +int BLASFUNC(xaxpyc)(int *, double *, double *, int *, double *, int *); + +int BLASFUNC(scopy) (int *, float *, int *, float *, int *); +int BLASFUNC(dcopy) (int *, double *, int *, double *, int *); +int BLASFUNC(qcopy) (int *, double *, int *, double *, int *); +int BLASFUNC(ccopy) (int *, float *, int *, float *, int *); +int BLASFUNC(zcopy) (int *, double *, int *, double *, int *); +int BLASFUNC(xcopy) (int *, double *, int *, double *, int *); + +int BLASFUNC(sswap) (int *, float *, int *, float *, int *); +int BLASFUNC(dswap) (int *, double *, int *, double *, int *); +int BLASFUNC(qswap) (int *, double *, int *, double *, int *); +int BLASFUNC(cswap) (int *, float *, int *, float *, int *); +int BLASFUNC(zswap) (int *, double *, int *, double *, int *); +int BLASFUNC(xswap) (int *, double *, int *, double *, int *); + +float BLASFUNC(sasum) (int *, float *, int *); +float BLASFUNC(scasum)(int *, float *, int *); +double BLASFUNC(dasum) (int *, double *, int *); +double BLASFUNC(qasum) (int *, double *, int *); +double BLASFUNC(dzasum)(int *, double *, int *); +double BLASFUNC(qxasum)(int *, double *, int *); + +int BLASFUNC(isamax)(int *, float *, int *); +int BLASFUNC(idamax)(int *, double *, int *); +int BLASFUNC(iqamax)(int *, double *, int *); +int BLASFUNC(icamax)(int *, float *, int *); +int BLASFUNC(izamax)(int *, double *, int *); +int BLASFUNC(ixamax)(int *, double *, int *); + +int BLASFUNC(ismax) (int *, float *, int *); +int BLASFUNC(idmax) (int *, double *, int *); +int BLASFUNC(iqmax) (int *, double *, int *); +int BLASFUNC(icmax) (int *, float *, int *); +int BLASFUNC(izmax) (int *, double *, int *); +int BLASFUNC(ixmax) (int *, double *, int *); + +int BLASFUNC(isamin)(int *, float *, int *); +int BLASFUNC(idamin)(int *, double *, int *); +int BLASFUNC(iqamin)(int *, double *, int *); +int BLASFUNC(icamin)(int *, float *, int *); +int BLASFUNC(izamin)(int *, double *, int *); +int BLASFUNC(ixamin)(int *, double *, int *); + +int BLASFUNC(ismin)(int *, float *, int *); +int BLASFUNC(idmin)(int *, double *, int *); +int BLASFUNC(iqmin)(int *, double *, int *); +int BLASFUNC(icmin)(int *, float *, int *); +int BLASFUNC(izmin)(int *, double *, int *); +int BLASFUNC(ixmin)(int *, double *, int *); + +float BLASFUNC(samax) (int *, float *, int *); +double BLASFUNC(damax) (int *, double *, int *); +double BLASFUNC(qamax) (int *, double *, int *); +float BLASFUNC(scamax)(int *, float *, int *); +double BLASFUNC(dzamax)(int *, double *, int *); +double BLASFUNC(qxamax)(int *, double *, int *); + +float BLASFUNC(samin) (int *, float *, int *); +double BLASFUNC(damin) (int *, double *, int *); +double BLASFUNC(qamin) (int *, double *, int *); +float BLASFUNC(scamin)(int *, float *, int *); +double BLASFUNC(dzamin)(int *, double *, int *); +double BLASFUNC(qxamin)(int *, double *, int *); + +float BLASFUNC(smax) (int *, float *, int *); +double BLASFUNC(dmax) (int *, double *, int *); +double BLASFUNC(qmax) (int *, double *, int *); +float BLASFUNC(scmax) (int *, float *, int *); +double BLASFUNC(dzmax) (int *, double *, int *); +double BLASFUNC(qxmax) (int *, double *, int *); + +float BLASFUNC(smin) (int *, float *, int *); +double BLASFUNC(dmin) (int *, double *, int *); +double BLASFUNC(qmin) (int *, double *, int *); +float BLASFUNC(scmin) (int *, float *, int *); +double BLASFUNC(dzmin) (int *, double *, int *); +double BLASFUNC(qxmin) (int *, double *, int *); + +int BLASFUNC(sscal) (int *, float *, float *, int *); +int BLASFUNC(dscal) (int *, double *, double *, int *); +int BLASFUNC(qscal) (int *, double *, double *, int *); +int BLASFUNC(cscal) (int *, float *, float *, int *); +int BLASFUNC(zscal) (int *, double *, double *, int *); +int BLASFUNC(xscal) (int *, double *, double *, int *); +int BLASFUNC(csscal)(int *, float *, float *, int *); +int BLASFUNC(zdscal)(int *, double *, double *, int *); +int BLASFUNC(xqscal)(int *, double *, double *, int *); + +float BLASFUNC(snrm2) (int *, float *, int *); +float BLASFUNC(scnrm2)(int *, float *, int *); + +double BLASFUNC(dnrm2) (int *, double *, int *); +double BLASFUNC(qnrm2) (int *, double *, int *); +double BLASFUNC(dznrm2)(int *, double *, int *); +double BLASFUNC(qxnrm2)(int *, double *, int *); + +int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *); +int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *); +int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *); +int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *); +int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *); +int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *); + +int BLASFUNC(srotg) (float *, float *, float *, float *); +int BLASFUNC(drotg) (double *, double *, double *, double *); +int BLASFUNC(qrotg) (double *, double *, double *, double *); +int BLASFUNC(crotg) (float *, float *, float *, float *); +int BLASFUNC(zrotg) (double *, double *, double *, double *); +int BLASFUNC(xrotg) (double *, double *, double *, double *); + +int BLASFUNC(srotmg)(float *, float *, float *, float *, float *); +int BLASFUNC(drotmg)(double *, double *, double *, double *, double *); + +int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *); +int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *); +int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *); + +/* Level 2 routines */ + +int BLASFUNC(sger)(int *, int *, float *, float *, int *, + float *, int *, float *, int *); +int BLASFUNC(dger)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(qger)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(cgeru)(int *, int *, float *, float *, int *, + float *, int *, float *, int *); +int BLASFUNC(cgerc)(int *, int *, float *, float *, int *, + float *, int *, float *, int *); +int BLASFUNC(zgeru)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(zgerc)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(xgeru)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); +int BLASFUNC(xgerc)(int *, int *, double *, double *, int *, + double *, int *, double *, int *); + +int BLASFUNC(sgemv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dgemv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qgemv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(cgemv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zgemv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xgemv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(strsv) (char *, char *, char *, int *, float *, int *, + float *, int *); +int BLASFUNC(dtrsv) (char *, char *, char *, int *, double *, int *, + double *, int *); +int BLASFUNC(qtrsv) (char *, char *, char *, int *, double *, int *, + double *, int *); +int BLASFUNC(ctrsv) (char *, char *, char *, int *, float *, int *, + float *, int *); +int BLASFUNC(ztrsv) (char *, char *, char *, int *, double *, int *, + double *, int *); +int BLASFUNC(xtrsv) (char *, char *, char *, int *, double *, int *, + double *, int *); + +int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *); + +int BLASFUNC(strmv) (char *, char *, char *, int *, float *, int *, + float *, int *); +int BLASFUNC(dtrmv) (char *, char *, char *, int *, double *, int *, + double *, int *); +int BLASFUNC(qtrmv) (char *, char *, char *, int *, double *, int *, + double *, int *); +int BLASFUNC(ctrmv) (char *, char *, char *, int *, float *, int *, + float *, int *); +int BLASFUNC(ztrmv) (char *, char *, char *, int *, double *, int *, + double *, int *); +int BLASFUNC(xtrmv) (char *, char *, char *, int *, double *, int *, + double *, int *); + +int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *); +int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *); +int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *); + +int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); + +int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *); +int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); +int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *); + +int BLASFUNC(ssymv) (char *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dsymv) (char *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qsymv) (char *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(csymv) (char *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zsymv) (char *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xsymv) (char *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(sspmv) (char *, int *, float *, float *, + float *, int *, float *, float *, int *); +int BLASFUNC(dspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(qspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(cspmv) (char *, int *, float *, float *, + float *, int *, float *, float *, int *); +int BLASFUNC(zspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(xspmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); + +int BLASFUNC(ssyr) (char *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(dsyr) (char *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(qsyr) (char *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(csyr) (char *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(zsyr) (char *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(xsyr) (char *, int *, double *, double *, int *, + double *, int *); + +int BLASFUNC(ssyr2) (char *, int *, float *, + float *, int *, float *, int *, float *, int *); +int BLASFUNC(dsyr2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); +int BLASFUNC(qsyr2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); +int BLASFUNC(csyr2) (char *, int *, float *, + float *, int *, float *, int *, float *, int *); +int BLASFUNC(zsyr2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); +int BLASFUNC(xsyr2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); + +int BLASFUNC(sspr) (char *, int *, float *, float *, int *, + float *); +int BLASFUNC(dspr) (char *, int *, double *, double *, int *, + double *); +int BLASFUNC(qspr) (char *, int *, double *, double *, int *, + double *); +int BLASFUNC(cspr) (char *, int *, float *, float *, int *, + float *); +int BLASFUNC(zspr) (char *, int *, double *, double *, int *, + double *); +int BLASFUNC(xspr) (char *, int *, double *, double *, int *, + double *); + +int BLASFUNC(sspr2) (char *, int *, float *, + float *, int *, float *, int *, float *); +int BLASFUNC(dspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(qspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(cspr2) (char *, int *, float *, + float *, int *, float *, int *, float *); +int BLASFUNC(zspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(xspr2) (char *, int *, double *, + double *, int *, double *, int *, double *); + +int BLASFUNC(cher) (char *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(zher) (char *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(xher) (char *, int *, double *, double *, int *, + double *, int *); + +int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *); +int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *); +int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *); + +int BLASFUNC(cher2) (char *, int *, float *, + float *, int *, float *, int *, float *, int *); +int BLASFUNC(zher2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); +int BLASFUNC(xher2) (char *, int *, double *, + double *, int *, double *, int *, double *, int *); + +int BLASFUNC(chpr2) (char *, int *, float *, + float *, int *, float *, int *, float *); +int BLASFUNC(zhpr2) (char *, int *, double *, + double *, int *, double *, int *, double *); +int BLASFUNC(xhpr2) (char *, int *, double *, + double *, int *, double *, int *, double *); + +int BLASFUNC(chemv) (char *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhemv) (char *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhemv) (char *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(chpmv) (char *, int *, float *, float *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhpmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhpmv) (char *, int *, double *, double *, + double *, int *, double *, double *, int *); + +int BLASFUNC(snorm)(char *, int *, int *, float *, int *); +int BLASFUNC(dnorm)(char *, int *, int *, double *, int *); +int BLASFUNC(cnorm)(char *, int *, int *, float *, int *); +int BLASFUNC(znorm)(char *, int *, int *, double *, int *); + +int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +/* Level 3 routines */ + +int BLASFUNC(sgemm)(char *, char *, int *, int *, int *, float *, + float *, int *, float *, int *, float *, float *, int *); +int BLASFUNC(dgemm)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); +int BLASFUNC(qgemm)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); +int BLASFUNC(cgemm)(char *, char *, int *, int *, int *, float *, + float *, int *, float *, int *, float *, float *, int *); +int BLASFUNC(zgemm)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); +int BLASFUNC(xgemm)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); + +int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *, + float *, int *, float *, int *, float *, float *, int *); +int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); +int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *, + double *, int *, double *, int *, double *, double *, int *); + +int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *, + float *, float *, int *); +int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *, + double *, double *, int *); +int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *, + float *, float *, int *); +int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *, + double *, double *, int *); + +int BLASFUNC(strsm)(char *, char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *); +int BLASFUNC(dtrsm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); +int BLASFUNC(qtrsm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); +int BLASFUNC(ctrsm)(char *, char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *); +int BLASFUNC(ztrsm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); +int BLASFUNC(xtrsm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); + +int BLASFUNC(strmm)(char *, char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *); +int BLASFUNC(dtrmm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); +int BLASFUNC(qtrmm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); +int BLASFUNC(ctrmm)(char *, char *, char *, char *, int *, int *, + float *, float *, int *, float *, int *); +int BLASFUNC(ztrmm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); +int BLASFUNC(xtrmm)(char *, char *, char *, char *, int *, int *, + double *, double *, int *, double *, int *); + +int BLASFUNC(ssymm)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dsymm)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(qsymm)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(csymm)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zsymm)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xsymm)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(ssyrk)(char *, char *, int *, int *, float *, float *, int *, + float *, float *, int *); +int BLASFUNC(dsyrk)(char *, char *, int *, int *, double *, double *, int *, + double *, double *, int *); +int BLASFUNC(qsyrk)(char *, char *, int *, int *, double *, double *, int *, + double *, double *, int *); +int BLASFUNC(csyrk)(char *, char *, int *, int *, float *, float *, int *, + float *, float *, int *); +int BLASFUNC(zsyrk)(char *, char *, int *, int *, double *, double *, int *, + double *, double *, int *); +int BLASFUNC(xsyrk)(char *, char *, int *, int *, double *, double *, int *, + double *, double *, int *); + +int BLASFUNC(ssyr2k)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(dsyr2k)(char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); +int BLASFUNC(qsyr2k)(char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); +int BLASFUNC(csyr2k)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zsyr2k)(char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); +int BLASFUNC(xsyr2k)(char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); + +int BLASFUNC(chemm)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhemm)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhemm)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); +int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *, + double *, int *, double *, double *, int *); + +int BLASFUNC(cherk)(char *, char *, int *, int *, float *, float *, int *, + float *, float *, int *); +int BLASFUNC(zherk)(char *, char *, int *, int *, double *, double *, int *, + double *, double *, int *); +int BLASFUNC(xherk)(char *, char *, int *, int *, double *, double *, int *, + double *, double *, int *); + +int BLASFUNC(cher2k)(char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zher2k)(char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); +int BLASFUNC(xher2k)(char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); +int BLASFUNC(cher2m)(char *, char *, char *, int *, int *, float *, float *, int *, + float *, int *, float *, float *, int *); +int BLASFUNC(zher2m)(char *, char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); +int BLASFUNC(xher2m)(char *, char *, char *, int *, int *, double *, double *, int *, + double*, int *, double *, double *, int *); + +int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *, + double *, int *); +int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *, + float *, int *); +int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *, + double *, int *); + +int BLASFUNC(sgema)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(dgema)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); +int BLASFUNC(cgema)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(zgema)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); + +int BLASFUNC(sgems)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(dgems)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); +int BLASFUNC(cgems)(char *, char *, int *, int *, float *, + float *, int *, float *, float *, int *, float *, int *); +int BLASFUNC(zgems)(char *, char *, int *, int *, double *, + double *, int *, double*, double *, int *, double*, int *); + +int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *); + +int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *); +int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *); +int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *); + +int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *); +int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *); +int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *); +int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *); +int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *); +int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *); + +int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); +int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); +int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); +int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *); + +int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *); +int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *); +int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *); +int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *); +int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *); + +int BLASFUNC(spotf2)(char *, int *, float *, int *, int *); +int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *); +int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *); +int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *); +int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *); +int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *); + +int BLASFUNC(spotrf)(char *, int *, float *, int *, int *); +int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *); +int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *); +int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *); +int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *); +int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *); + +int BLASFUNC(slauu2)(char *, int *, float *, int *, int *); +int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *); +int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *); +int BLASFUNC(clauu2)(char *, int *, float *, int *, int *); +int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *); +int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *); + +int BLASFUNC(slauum)(char *, int *, float *, int *, int *); +int BLASFUNC(dlauum)(char *, int *, double *, int *, int *); +int BLASFUNC(qlauum)(char *, int *, double *, int *, int *); +int BLASFUNC(clauum)(char *, int *, float *, int *, int *); +int BLASFUNC(zlauum)(char *, int *, double *, int *, int *); +int BLASFUNC(xlauum)(char *, int *, double *, int *, int *); + +int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *); + +int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *); +int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *); +int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *); + +int BLASFUNC(spotri)(char *, int *, float *, int *, int *); +int BLASFUNC(dpotri)(char *, int *, double *, int *, int *); +int BLASFUNC(qpotri)(char *, int *, double *, int *, int *); +int BLASFUNC(cpotri)(char *, int *, float *, int *, int *); +int BLASFUNC(zpotri)(char *, int *, double *, int *, int *); +int BLASFUNC(xpotri)(char *, int *, double *, int *, int *); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/gtsam/3rdparty/Eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/gtsam/3rdparty/Eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h index 7d509e78f..5b979ebf8 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h @@ -29,6 +29,16 @@ operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const */ EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op) +/** \returns an expression of the coefficient-wise min of \c *this and scalar \a other + * + * \sa max() + */ +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> +(min)(const Scalar &other) const +{ + return (min)(Derived::PlainObject::Constant(rows(), cols(), other)); +} + /** \returns an expression of the coefficient-wise max of \c *this and \a other * * Example: \include Cwise_max.cpp @@ -38,6 +48,16 @@ EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op) */ EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op) +/** \returns an expression of the coefficient-wise max of \c *this and scalar \a other + * + * \sa min() + */ +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> +(max)(const Scalar &other) const +{ + return (max)(Derived::PlainObject::Constant(rows(), cols(), other)); +} + /** \returns an expression of the coefficient-wise \< operator of *this and \a other * * Example: \include Cwise_less.cpp @@ -141,3 +161,39 @@ operator-(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS& { return (-other) + scalar; } + +/** \returns an expression of the coefficient-wise && operator of *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_and.cpp + * Output: \verbinclude Cwise_boolean_and.out + * + * \sa operator||(), select() + */ +template +inline const CwiseBinaryOp +operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} + +/** \returns an expression of the coefficient-wise || operator of *this and \a other + * + * \warning this operator is for expression of bool only. + * + * Example: \include Cwise_boolean_or.cpp + * Output: \verbinclude Cwise_boolean_or.out + * + * \sa operator&&(), select() + */ +template +inline const CwiseBinaryOp +operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const +{ + EIGEN_STATIC_ASSERT((internal::is_same::value && internal::is_same::value), + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); + return CwiseBinaryOp(derived(),other.derived()); +} diff --git a/gtsam/3rdparty/Eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/gtsam/3rdparty/Eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h index 35183f91f..566f4c1f4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h @@ -91,6 +91,16 @@ cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } +/** \returns an expression of the coefficient-wise min of *this and scalar \a other + * + * \sa class CwiseBinaryOp, min() + */ +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> +cwiseMin(const Scalar &other) const +{ + return cwiseMin(Derived::PlainObject::Constant(rows(), cols(), other)); +} + /** \returns an expression of the coefficient-wise max of *this and \a other * * Example: \include MatrixBase_cwiseMax.cpp @@ -105,6 +115,17 @@ cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } +/** \returns an expression of the coefficient-wise max of *this and scalar \a other + * + * \sa class CwiseBinaryOp, min() + */ +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> +cwiseMax(const Scalar &other) const +{ + return cwiseMax(Derived::PlainObject::Constant(rows(), cols(), other)); +} + + /** \returns an expression of the coefficient-wise quotient of *this and \a other * * Example: \include MatrixBase_cwiseQuotient.cpp diff --git a/gtsam/3rdparty/Eigen/bench/BenchSparseUtil.h b/gtsam/3rdparty/Eigen/bench/BenchSparseUtil.h index ff836bffe..13981f6b7 100644 --- a/gtsam/3rdparty/Eigen/bench/BenchSparseUtil.h +++ b/gtsam/3rdparty/Eigen/bench/BenchSparseUtil.h @@ -26,7 +26,7 @@ typedef SparseMatrix EigenSparseMatrix; void fillMatrix(float density, int rows, int cols, EigenSparseMatrix& dst) { - dst.reserve(rows*cols*density); + dst.reserve(double(rows)*cols*density); for(int j = 0; j < cols; j++) { for(int i = 0; i < rows; i++) @@ -122,22 +122,24 @@ void eiToCSparse(const EigenSparseMatrix& src, cs* &dst) #include #include #include -// #include -// using namespace boost; -// using namespace boost::numeric; -// using namespace boost::numeric::ublas; +typedef boost::numeric::ublas::compressed_matrix UBlasSparse; -typedef boost::numeric::ublas::compressed_matrix UblasMatrix; - -void eiToUblas(const EigenSparseMatrix& src, UblasMatrix& dst) +void eiToUblas(const EigenSparseMatrix& src, UBlasSparse& dst) { + dst.resize(src.rows(), src.cols(), false); for (int j=0; j +void eiToUblasVec(const EigenType& src, UblasType& dst) +{ + dst.resize(src.size()); + for (int j=0; j +#elif defined(__APPLE__) +#include +#include #else # include #endif @@ -76,6 +79,7 @@ public: inline void reset() { m_bests.fill(1e9); + m_worsts.fill(0); m_totals.setZero(); } inline void start() @@ -89,40 +93,52 @@ public: m_times[REAL_TIMER] = getRealTime() - m_starts[REAL_TIMER]; #if EIGEN_VERSION_AT_LEAST(2,90,0) m_bests = m_bests.cwiseMin(m_times); + m_worsts = m_worsts.cwiseMax(m_times); #else m_bests(0) = std::min(m_bests(0),m_times(0)); m_bests(1) = std::min(m_bests(1),m_times(1)); + m_worsts(0) = std::max(m_worsts(0),m_times(0)); + m_worsts(1) = std::max(m_worsts(1),m_times(1)); #endif m_totals += m_times; } /** Return the elapsed time in seconds between the last start/stop pair */ - inline double value(int TIMER = CPU_TIMER) + inline double value(int TIMER = CPU_TIMER) const { return m_times[TIMER]; } /** Return the best elapsed time in seconds */ - inline double best(int TIMER = CPU_TIMER) + inline double best(int TIMER = CPU_TIMER) const { return m_bests[TIMER]; } + /** Return the worst elapsed time in seconds + */ + inline double worst(int TIMER = CPU_TIMER) const + { + return m_worsts[TIMER]; + } + /** Return the total elapsed time in seconds. */ - inline double total(int TIMER = CPU_TIMER) + inline double total(int TIMER = CPU_TIMER) const { return m_totals[TIMER]; } - inline double getCpuTime() + inline double getCpuTime() const { #ifdef _WIN32 LARGE_INTEGER query_ticks; QueryPerformanceCounter(&query_ticks); return query_ticks.QuadPart/m_frequency; +#elif __APPLE__ + return double(mach_absolute_time())*1e-9; #else timespec ts; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); @@ -130,12 +146,14 @@ public: #endif } - inline double getRealTime() + inline double getRealTime() const { #ifdef _WIN32 SYSTEMTIME st; GetSystemTime(&st); return (double)st.wSecond + 1.e-3 * (double)st.wMilliseconds; +#elif __APPLE__ + return double(mach_absolute_time())*1e-9; #else timespec ts; clock_gettime(CLOCK_REALTIME, &ts); @@ -150,8 +168,11 @@ protected: Vector2d m_starts; Vector2d m_times; Vector2d m_bests; + Vector2d m_worsts; Vector2d m_totals; +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW }; #define BENCH(TIMER,TRIES,REP,CODE) { \ diff --git a/gtsam/3rdparty/Eigen/bench/BenchUtil.h b/gtsam/3rdparty/Eigen/bench/BenchUtil.h index 9798fa385..8883a1380 100644 --- a/gtsam/3rdparty/Eigen/bench/BenchUtil.h +++ b/gtsam/3rdparty/Eigen/bench/BenchUtil.h @@ -69,4 +69,24 @@ void eiToGsl(const EigenMatrixType& src, gsl_matrix** dst) } #endif +#ifdef BENCH_UBLAS +#include +#include +template +void eiToUblas(const EigenMatrixType& src, UblasMatrixType& dst) +{ + dst.resize(src.rows(),src.cols()); + for (int j=0; j +void eiToUblasVec(const EigenType& src, UblasType& dst) +{ + dst.resize(src.size()); + for (int j=0; j(kc, mc, nc); + internal::computeProductBlockingSizes(kc, mc, nc); std::cout << "blocking size (mc x kc) = " << mc << " x " << kc << "\n"; C r = c; @@ -188,23 +189,22 @@ int main(int argc, char ** argv) blas_gemm(a,b,r); c.noalias() += a * b; if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n"; -// std::cerr << r << "\n\n" << c << "\n\n"; #else gemm(a,b,c); r.noalias() += a.cast() * b.cast(); if(!r.isApprox(c)) std::cerr << "Warning, your product is crap!\n\n"; -// std::cerr << c << "\n\n"; -// std::cerr << r << "\n\n"; #endif #ifdef HAVE_BLAS BenchTimer tblas; + c = rc; BENCH(tblas, tries, rep, blas_gemm(a,b,c)); std::cout << "blas cpu " << tblas.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(CPU_TIMER) << "s)\n"; std::cout << "blas real " << tblas.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tblas.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tblas.total(REAL_TIMER) << "s)\n"; #endif BenchTimer tmt; + c = rc; BENCH(tmt, tries, rep, gemm(a,b,c)); std::cout << "eigen cpu " << tmt.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(CPU_TIMER) << "s)\n"; std::cout << "eigen real " << tmt.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmt.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmt.total(REAL_TIMER) << "s)\n"; @@ -213,8 +213,9 @@ int main(int argc, char ** argv) if(procs>1) { BenchTimer tmono; - //omp_set_num_threads(1); - Eigen::setNbThreads(1); + omp_set_num_threads(1); + Eigen::internal::setNbThreads(1); + c = rc; BENCH(tmono, tries, rep, gemm(a,b,c)); std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n"; std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n"; diff --git a/gtsam/3rdparty/Eigen/bench/btl/CMakeLists.txt b/gtsam/3rdparty/Eigen/bench/btl/CMakeLists.txt index 95ffbf42a..119b470d9 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/bench/btl/CMakeLists.txt @@ -2,7 +2,7 @@ PROJECT(BTL) CMAKE_MINIMUM_REQUIRED(VERSION 2.6.2) -set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake ${Eigen_SOURCE_DIR}/cmake) include(MacroOptionalAddSubdirectory) OPTION(BTL_NOVEC "Disable SSE/Altivec optimizations when possible" OFF) @@ -90,17 +90,14 @@ endmacro(btl_add_target_property) ENABLE_TESTING() add_subdirectory(libs/eigen3) -# add_subdirectory(libs/hand_vec) +add_subdirectory(libs/eigen2) +add_subdirectory(libs/BLAS) +add_subdirectory(libs/ublas) add_subdirectory(libs/gmm) add_subdirectory(libs/mtl4) -add_subdirectory(libs/ublas) add_subdirectory(libs/blitz) add_subdirectory(libs/tvmet) -add_subdirectory(libs/C_BLAS) -add_subdirectory(libs/f77) -add_subdirectory(libs/C) add_subdirectory(libs/STL) -add_subdirectory(libs/STL_algo) add_subdirectory(data) diff --git a/gtsam/3rdparty/Eigen/bench/btl/actions/action_aat_product.hh b/gtsam/3rdparty/Eigen/bench/btl/actions/action_aat_product.hh index 92930e219..aa5b35c94 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/actions/action_aat_product.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/actions/action_aat_product.hh @@ -87,7 +87,7 @@ public : } double nb_op_base( void ){ - return 2.0*_size*_size*_size; + return double(_size)*double(_size)*double(_size); } inline void initialize( void ){ diff --git a/gtsam/3rdparty/Eigen/bench/btl/actions/action_hessenberg.hh b/gtsam/3rdparty/Eigen/bench/btl/actions/action_hessenberg.hh index a46964657..2100ebd89 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/actions/action_hessenberg.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/actions/action_hessenberg.hh @@ -136,14 +136,17 @@ public : Action_tridiagonalization( int size ):_size(size) { - MESSAGE("Action_hessenberg Ctor"); + MESSAGE("Action_tridiagonalization Ctor"); // STL vector initialization - typename Interface::stl_matrix tmp; - init_matrix(tmp,_size); - init_matrix(X_stl,_size); - STL_interface::ata_product(tmp,X_stl,_size); - + init_matrix(X_stl,_size); + + for(int i=0; i<_size; ++i) + { + for(int j=0; j(C_stl,_size); init_matrix(resu_stl,_size); @@ -155,9 +158,9 @@ public : _cost = 0; for (int j=0; j<_size-2; ++j) { - int r = std::max(0,_size-j-1); - int b = std::max(0,_size-j-2); - _cost += 6 + 3*b + r*r*8; + double r = std::max(0,_size-j-1); + double b = std::max(0,_size-j-2); + _cost += 6. + 3.*b + r*r*8.; } } diff --git a/gtsam/3rdparty/Eigen/bench/btl/actions/basic_actions.hh b/gtsam/3rdparty/Eigen/bench/btl/actions/basic_actions.hh index 62442f01f..a3333ea26 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/actions/basic_actions.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/actions/basic_actions.hh @@ -6,7 +6,7 @@ #include "action_atv_product.hh" #include "action_matrix_matrix_product.hh" -#include "action_ata_product.hh" +// #include "action_ata_product.hh" #include "action_aat_product.hh" #include "action_trisolve.hh" diff --git a/gtsam/3rdparty/Eigen/bench/btl/cmake/FindEigen3.cmake b/gtsam/3rdparty/Eigen/bench/btl/cmake/FindEigen3.cmake deleted file mode 100644 index 9c546a05d..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/cmake/FindEigen3.cmake +++ /dev/null @@ -1,81 +0,0 @@ -# - Try to find Eigen3 lib -# -# This module supports requiring a minimum version, e.g. you can do -# find_package(Eigen3 3.1.2) -# to require version 3.1.2 or newer of Eigen3. -# -# Once done this will define -# -# EIGEN3_FOUND - system has eigen lib with correct version -# EIGEN3_INCLUDE_DIR - the eigen include directory -# EIGEN3_VERSION - eigen version - -# Copyright (c) 2006, 2007 Montel Laurent, -# Copyright (c) 2008, 2009 Gael Guennebaud, -# Copyright (c) 2009 Benoit Jacob -# Redistribution and use is allowed according to the terms of the 2-clause BSD license. - -if(NOT Eigen3_FIND_VERSION) - if(NOT Eigen3_FIND_VERSION_MAJOR) - set(Eigen3_FIND_VERSION_MAJOR 2) - endif(NOT Eigen3_FIND_VERSION_MAJOR) - if(NOT Eigen3_FIND_VERSION_MINOR) - set(Eigen3_FIND_VERSION_MINOR 91) - endif(NOT Eigen3_FIND_VERSION_MINOR) - if(NOT Eigen3_FIND_VERSION_PATCH) - set(Eigen3_FIND_VERSION_PATCH 0) - endif(NOT Eigen3_FIND_VERSION_PATCH) - - set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}") -endif(NOT Eigen3_FIND_VERSION) - -macro(_eigen3_check_version) - file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header) - - string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}") - set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}") - string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}") - set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}") - string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}") - set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}") - - set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION}) - if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) - set(EIGEN3_VERSION_OK FALSE) - else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) - set(EIGEN3_VERSION_OK TRUE) - endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) - - if(NOT EIGEN3_VERSION_OK) - - message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, " - "but at least version ${Eigen3_FIND_VERSION} is required") - endif(NOT EIGEN3_VERSION_OK) -endmacro(_eigen3_check_version) - -if (EIGEN3_INCLUDE_DIR) - - # in cache already - _eigen3_check_version() - set(EIGEN3_FOUND ${EIGEN3_VERSION_OK}) - -else (EIGEN3_INCLUDE_DIR) - - find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library - PATHS - ${CMAKE_INSTALL_PREFIX}/include - ${KDE4_INCLUDE_DIR} - PATH_SUFFIXES eigen3 eigen - ) - - if(EIGEN3_INCLUDE_DIR) - _eigen3_check_version() - endif(EIGEN3_INCLUDE_DIR) - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK) - - mark_as_advanced(EIGEN3_INCLUDE_DIR) - -endif(EIGEN3_INCLUDE_DIR) - diff --git a/gtsam/3rdparty/Eigen/bench/btl/data/action_settings.txt b/gtsam/3rdparty/Eigen/bench/btl/data/action_settings.txt index da80f56a0..e32213e22 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/data/action_settings.txt +++ b/gtsam/3rdparty/Eigen/bench/btl/data/action_settings.txt @@ -5,6 +5,7 @@ axpby ; "{/*1.5 Y = alpha X + beta Y}" ; "vector size" ; 5:1000000 axpy ; "{/*1.5 Y += alpha X}" ; "vector size" ; 5:1000000 matrix_matrix ; "{/*1.5 matrix matrix product}" ; "matrix size" ; 4:3000 matrix_vector ; "{/*1.5 matrix vector product}" ; "matrix size" ; 4:3000 +trmm ; "{/*1.5 triangular matrix matrix product}" ; "matrix size" ; 4:3000 trisolve_vector ; "{/*1.5 triangular solver - vector (X = inv(L) X)}" ; "size" ; 4:3000 trisolve_matrix ; "{/*1.5 triangular solver - matrix (M = inv(L) M)}" ; "size" ; 4:3000 cholesky ; "{/*1.5 Cholesky decomposition}" ; "matrix size" ; 4:3000 diff --git a/gtsam/3rdparty/Eigen/bench/btl/data/go_mean b/gtsam/3rdparty/Eigen/bench/btl/data/go_mean index 37f29fa17..42338ca27 100755 --- a/gtsam/3rdparty/Eigen/bench/btl/data/go_mean +++ b/gtsam/3rdparty/Eigen/bench/btl/data/go_mean @@ -27,7 +27,7 @@ echo '
    '\ '
  • ' `cat /proc/cpuinfo | grep "model name" | head -n 1`\ ' (' `uname -m` ')
  • '\ '
  • compiler: ' `cat compiler_version.txt` '
  • '\ - '
  • eigen2: ' `svn info $EIGENDIR | grep Revision` '
  • '\ + '
  • eigen3: ' `hg identify -i $EIGENDIR` '
  • '\ '
' \ '

' >> $webpagefilename @@ -37,11 +37,11 @@ source mk_mean_script.sh matrix_vector $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh atv $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh matrix_matrix $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh aat $1 11 100 300 1000 $mode $prefix -source mk_mean_script.sh ata $1 11 100 300 1000 $mode $prefix +# source mk_mean_script.sh ata $1 11 100 300 1000 $mode $prefix +source mk_mean_script.sh trmm $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh trisolve_vector $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh trisolve_matrix $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh cholesky $1 11 100 300 1000 $mode $prefix -source mk_mean_script.sh complete_lu_decomp $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh partial_lu_decomp $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh tridiagonalization $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh hessenberg $1 11 100 300 1000 $mode $prefix @@ -49,6 +49,7 @@ source mk_mean_script.sh symv $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh syr2 $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh ger $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh rot $1 11 2500 100000 250000 $mode $prefix +source mk_mean_script.sh complete_lu_decomp $1 11 100 300 1000 $mode $prefix fi diff --git a/gtsam/3rdparty/Eigen/bench/btl/data/perlib_plot_settings.txt b/gtsam/3rdparty/Eigen/bench/btl/data/perlib_plot_settings.txt index 82b09ce26..6844bab28 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/data/perlib_plot_settings.txt +++ b/gtsam/3rdparty/Eigen/bench/btl/data/perlib_plot_settings.txt @@ -1,10 +1,12 @@ eigen3 ; with lines lw 4 lt 1 lc rgbcolor "black" +eigen2 ; with lines lw 3 lt 1 lc rgbcolor "#999999" +EigenBLAS ; with lines lw 3 lt 3 lc rgbcolor "#999999" eigen3_novec ; with lines lw 2 lt 1 lc rgbcolor "#999999" eigen3_nogccvec ; with lines lw 2 lt 2 lc rgbcolor "#991010" -INTEL_MKL ; with lines lw 3 lt 2 lc rgbcolor "#00b7ff" -ATLAS ; with lines lw 3 lt 1 lc rgbcolor "#52e657" +INTEL_MKL ; with lines lw 3 lt 1 lc rgbcolor "#ff0000" +ATLAS ; with lines lw 3 lt 1 lc rgbcolor "#008000" gmm ; with lines lw 3 lt 1 lc rgbcolor "#0000ff" -ublas ; with lines lw 3 lt 1 lc rgbcolor "#ff0000" +ublas ; with lines lw 3 lt 1 lc rgbcolor "#00b7ff" mtl4 ; with lines lw 3 lt 1 lc rgbcolor "#d18847" blitz ; with lines lw 3 lt 1 lc rgbcolor "#ff00ff" F77 ; with lines lw 3 lt 3 lc rgbcolor "#e6e64c" diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/CMakeLists.txt b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/CMakeLists.txt similarity index 69% rename from gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/CMakeLists.txt rename to gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/CMakeLists.txt index 59065cb9b..de42fe047 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/CMakeLists.txt @@ -1,7 +1,6 @@ find_package(ATLAS) if (ATLAS_FOUND) - include_directories(${PROJECT_SOURCE_DIR}/libs/f77) btl_add_bench(btl_atlas main.cpp) if(BUILD_btl_atlas) target_link_libraries(btl_atlas ${ATLAS_LIBRARIES}) @@ -11,7 +10,6 @@ endif (ATLAS_FOUND) find_package(MKL) if (MKL_FOUND) - include_directories(${PROJECT_SOURCE_DIR}/libs/f77) btl_add_bench(btl_mkl main.cpp) if(BUILD_btl_mkl) target_link_libraries(btl_mkl ${MKL_LIBRARIES}) @@ -19,33 +17,44 @@ if (MKL_FOUND) endif(BUILD_btl_mkl) endif (MKL_FOUND) -find_package(GOTO) -if (GOTO_FOUND) - include_directories(${PROJECT_SOURCE_DIR}/libs/f77) - btl_add_bench(btl_goto main.cpp) - if(BUILD_btl_goto) - target_link_libraries(btl_goto ${GOTO_LIBRARIES} ) - set_target_properties(btl_goto PROPERTIES COMPILE_FLAGS "-DCBLASNAME=GOTO -DPUREBLAS") - endif(BUILD_btl_goto) -endif (GOTO_FOUND) - find_package(GOTO2) if (GOTO2_FOUND) - include_directories(${PROJECT_SOURCE_DIR}/libs/f77) btl_add_bench(btl_goto2 main.cpp) if(BUILD_btl_goto2) target_link_libraries(btl_goto2 ${GOTO_LIBRARIES} ) - set_target_properties(btl_goto2 PROPERTIES COMPILE_FLAGS "-DCBLASNAME=GOTO2 -DPUREBLAS") + set_target_properties(btl_goto2 PROPERTIES COMPILE_FLAGS "-DCBLASNAME=GOTO2") endif(BUILD_btl_goto2) endif (GOTO2_FOUND) +find_package(GOTO) +if (GOTO_FOUND) + if(GOTO2_FOUND) + btl_add_bench(btl_goto main.cpp OFF) + else() + btl_add_bench(btl_goto main.cpp) + endif() + if(BUILD_btl_goto) + target_link_libraries(btl_goto ${GOTO_LIBRARIES} ) + set_target_properties(btl_goto PROPERTIES COMPILE_FLAGS "-DCBLASNAME=GOTO") + endif(BUILD_btl_goto) +endif (GOTO_FOUND) + find_package(ACML) if (ACML_FOUND) - include_directories(${PROJECT_SOURCE_DIR}/libs/f77) btl_add_bench(btl_acml main.cpp) if(BUILD_btl_acml) target_link_libraries(btl_acml ${ACML_LIBRARIES} ) - set_target_properties(btl_acml PROPERTIES COMPILE_FLAGS "-DCBLASNAME=ACML -DHAS_LAPACK=1 -DPUREBLAS") + set_target_properties(btl_acml PROPERTIES COMPILE_FLAGS "-DCBLASNAME=ACML -DHAS_LAPACK=1") endif(BUILD_btl_acml) endif (ACML_FOUND) + +if(Eigen_SOURCE_DIR AND CMAKE_Fortran_COMPILER_WORKS) + # we are inside Eigen and blas/lapack interface is compilable + include_directories(${Eigen_SOURCE_DIR}) + btl_add_bench(btl_eigenblas main.cpp) + if(BUILD_btl_eigenblas) + target_link_libraries(btl_eigenblas eigen_blas eigen_lapack ) + set_target_properties(btl_eigenblas PROPERTIES COMPILE_FLAGS "-DCBLASNAME=EigenBLAS") + endif() +endif() diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/blas.h b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas.h similarity index 100% rename from gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/blas.h rename to gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas.h diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface.hh new file mode 100644 index 000000000..651054632 --- /dev/null +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface.hh @@ -0,0 +1,83 @@ +//===================================================== +// File : blas_interface.hh +// Author : L. Plagne +// Copyright (C) EDF R&D, lun sep 30 14:23:28 CEST 2002 +//===================================================== +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +#ifndef blas_PRODUIT_MATRICE_VECTEUR_HH +#define blas_PRODUIT_MATRICE_VECTEUR_HH + +#include +#include +extern "C" +{ +#include "blas.h" + + // Cholesky Factorization +// void spotrf_(const char* uplo, const int* n, float *a, const int* ld, int* info); +// void dpotrf_(const char* uplo, const int* n, double *a, const int* ld, int* info); + void ssytrd_(char *uplo, const int *n, float *a, const int *lda, float *d, float *e, float *tau, float *work, int *lwork, int *info ); + void dsytrd_(char *uplo, const int *n, double *a, const int *lda, double *d, double *e, double *tau, double *work, int *lwork, int *info ); + void sgehrd_( const int *n, int *ilo, int *ihi, float *a, const int *lda, float *tau, float *work, int *lwork, int *info ); + void dgehrd_( const int *n, int *ilo, int *ihi, double *a, const int *lda, double *tau, double *work, int *lwork, int *info ); + + // LU row pivoting +// void dgetrf_( int *m, int *n, double *a, int *lda, int *ipiv, int *info ); +// void sgetrf_(const int* m, const int* n, float *a, const int* ld, int* ipivot, int* info); + // LU full pivoting + void sgetc2_(const int* n, float *a, const int *lda, int *ipiv, int *jpiv, int*info ); + void dgetc2_(const int* n, double *a, const int *lda, int *ipiv, int *jpiv, int*info ); +#ifdef HAS_LAPACK +#endif +} + +#define MAKE_STRING2(S) #S +#define MAKE_STRING(S) MAKE_STRING2(S) + +#define CAT2(A,B) A##B +#define CAT(A,B) CAT2(A,B) + + +template class blas_interface; + + +static char notrans = 'N'; +static char trans = 'T'; +static char nonunit = 'N'; +static char lower = 'L'; +static char right = 'R'; +static char left = 'L'; +static int intone = 1; + + + +#define SCALAR float +#define SCALAR_PREFIX s +#include "blas_interface_impl.hh" +#undef SCALAR +#undef SCALAR_PREFIX + + +#define SCALAR double +#define SCALAR_PREFIX d +#include "blas_interface_impl.hh" +#undef SCALAR +#undef SCALAR_PREFIX + +#endif + + + diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface_impl.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface_impl.hh new file mode 100644 index 000000000..0e84df038 --- /dev/null +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/blas_interface_impl.hh @@ -0,0 +1,151 @@ + +#define BLAS_FUNC(NAME) CAT(CAT(SCALAR_PREFIX,NAME),_) + +template<> class blas_interface : public c_interface_base +{ + +public : + + static SCALAR fone; + static SCALAR fzero; + + static inline std::string name() + { + return MAKE_STRING(CBLASNAME); + } + + static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ + BLAS_FUNC(gemv)(¬rans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); + } + + static inline void symv(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ + BLAS_FUNC(symv)(&lower, &N,&fone,A,&N,B,&intone,&fzero,X,&intone); + } + + static inline void syr2(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ + BLAS_FUNC(syr2)(&lower,&N,&fone,B,&intone,X,&intone,A,&N); + } + + static inline void ger(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){ + BLAS_FUNC(ger)(&N,&N,&fone,X,&intone,Y,&intone,A,&N); + } + + static inline void rot(gene_vector & A, gene_vector & B, SCALAR c, SCALAR s, int N){ + BLAS_FUNC(rot)(&N,A,&intone,B,&intone,&c,&s); + } + + static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ + BLAS_FUNC(gemv)(&trans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); + } + + static inline void matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ + BLAS_FUNC(gemm)(¬rans,¬rans,&N,&N,&N,&fone,A,&N,B,&N,&fzero,X,&N); + } + + static inline void transposed_matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ + BLAS_FUNC(gemm)(¬rans,¬rans,&N,&N,&N,&fone,A,&N,B,&N,&fzero,X,&N); + } + +// static inline void ata_product(gene_matrix & A, gene_matrix & X, int N){ +// ssyrk_(&lower,&trans,&N,&N,&fone,A,&N,&fzero,X,&N); +// } + + static inline void aat_product(gene_matrix & A, gene_matrix & X, int N){ + BLAS_FUNC(syrk)(&lower,¬rans,&N,&N,&fone,A,&N,&fzero,X,&N); + } + + static inline void axpy(SCALAR coef, const gene_vector & X, gene_vector & Y, int N){ + BLAS_FUNC(axpy)(&N,&coef,X,&intone,Y,&intone); + } + + static inline void axpby(SCALAR a, const gene_vector & X, SCALAR b, gene_vector & Y, int N){ + BLAS_FUNC(scal)(&N,&b,Y,&intone); + BLAS_FUNC(axpy)(&N,&a,X,&intone,Y,&intone); + } + + static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ + int N2 = N*N; + BLAS_FUNC(copy)(&N2, X, &intone, C, &intone); + char uplo = 'L'; + int info = 0; + BLAS_FUNC(potrf)(&uplo, &N, C, &N, &info); + if(info!=0) std::cerr << "potrf_ error " << info << "\n"; + } + + static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ + int N2 = N*N; + BLAS_FUNC(copy)(&N2, X, &intone, C, &intone); + char uplo = 'L'; + int info = 0; + int * ipiv = (int*)alloca(sizeof(int)*N); + BLAS_FUNC(getrf)(&N, &N, C, &N, ipiv, &info); + if(info!=0) std::cerr << "getrf_ error " << info << "\n"; + } + + static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector & X, int N){ + BLAS_FUNC(copy)(&N, B, &intone, X, &intone); + BLAS_FUNC(trsv)(&lower, ¬rans, &nonunit, &N, L, &N, X, &intone); + } + + static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix & X, int N){ + BLAS_FUNC(copy)(&N, B, &intone, X, &intone); + BLAS_FUNC(trsm)(&right, &lower, ¬rans, &nonunit, &N, &N, &fone, L, &N, X, &N); + } + + static inline void trmm(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ + BLAS_FUNC(trmm)(&left, &lower, ¬rans,&nonunit, &N,&N,&fone,A,&N,B,&N); + } + + #ifdef HAS_LAPACK + + static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ + int N2 = N*N; + BLAS_FUNC(copy)(&N2, X, &intone, C, &intone); + char uplo = 'L'; + int info = 0; + int * ipiv = (int*)alloca(sizeof(int)*N); + int * jpiv = (int*)alloca(sizeof(int)*N); + BLAS_FUNC(getc2)(&N, C, &N, ipiv, jpiv, &info); + } + + + + static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int N){ + { + int N2 = N*N; + int inc = 1; + BLAS_FUNC(copy)(&N2, X, &inc, C, &inc); + } + int info = 0; + int ilo = 1; + int ihi = N; + int bsize = 64; + int worksize = N*bsize; + SCALAR* d = new SCALAR[N+worksize]; + BLAS_FUNC(gehrd)(&N, &ilo, &ihi, C, &N, d, d+N, &worksize, &info); + delete[] d; + } + + static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){ + { + int N2 = N*N; + int inc = 1; + BLAS_FUNC(copy)(&N2, X, &inc, C, &inc); + } + char uplo = 'U'; + int info = 0; + int ilo = 1; + int ihi = N; + int bsize = 64; + int worksize = N*bsize; + SCALAR* d = new SCALAR[3*N+worksize]; + BLAS_FUNC(sytrd)(&uplo, &N, C, &N, d, d+N, d+2*N, d+3*N, &worksize, &info); + delete[] d; + } + + #endif // HAS_LAPACK + +}; + +SCALAR blas_interface::fone = SCALAR(1); +SCALAR blas_interface::fzero = SCALAR(0); diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/f77_interface_base.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/c_interface_base.h similarity index 51% rename from gtsam/3rdparty/Eigen/bench/btl/libs/f77/f77_interface_base.hh rename to gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/c_interface_base.h index ab8a18295..515d8dcfc 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/f77_interface_base.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/c_interface_base.h @@ -1,38 +1,21 @@ -//===================================================== -// File : f77_interface_base.hh -// Author : L. Plagne -// Copyright (C) EDF R&D, lun sep 30 14:23:25 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef F77_INTERFACE_BASE_HH -#define F77_INTERFACE_BASE_HH + +#ifndef BTL_C_INTERFACE_BASE_H +#define BTL_C_INTERFACE_BASE_H #include "utilities.h" #include -template -class f77_interface_base{ + +template class c_interface_base +{ public: - typedef real real_type ; - typedef std::vector stl_vector; - typedef std::vector stl_matrix; + typedef real real_type; + typedef std::vector stl_vector; + typedef std::vector stl_matrix; - typedef real * gene_matrix; - typedef real * gene_vector; + typedef real* gene_matrix; + typedef real* gene_vector; static void free_matrix(gene_matrix & A, int N){ delete A; @@ -87,5 +70,4 @@ public: }; - #endif diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/main.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/main.cpp new file mode 100644 index 000000000..8347c9f0b --- /dev/null +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/BLAS/main.cpp @@ -0,0 +1,73 @@ +//===================================================== +// File : main.cpp +// Author : L. Plagne +// Copyright (C) EDF R&D, lun sep 30 14:23:28 CEST 2002 +//===================================================== +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +#include "utilities.h" +#include "blas_interface.hh" +#include "bench.hh" +#include "basic_actions.hh" + +#include "action_cholesky.hh" +#include "action_lu_decomp.hh" +#include "action_partial_lu.hh" +#include "action_trisolve_matrix.hh" + +#ifdef HAS_LAPACK +#include "action_hessenberg.hh" +#endif + +BTL_MAIN; + +int main() +{ + + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); + + bench > >(MIN_MV,MAX_MV,NB_POINT); + bench > >(MIN_MV,MAX_MV,NB_POINT); + bench > >(MIN_MV,MAX_MV,NB_POINT); + bench > >(MIN_MV,MAX_MV,NB_POINT); + + bench > >(MIN_MV,MAX_MV,NB_POINT); + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); + + bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + + bench > >(MIN_MM,MAX_MM,NB_POINT); + + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + + #ifdef HAS_LAPACK + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + #endif + + //bench > >(MIN_LU,MAX_LU,NB_POINT); + + return 0; +} + + diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C/CMakeLists.txt b/gtsam/3rdparty/Eigen/bench/btl/libs/C/CMakeLists.txt deleted file mode 100644 index 3d4d24cee..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/C/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -include_directories(${PROJECT_SOURCE_DIR}/libs/f77) -btl_add_bench(btl_C main.cpp OFF) -# set_target_properties(btl_C PROPERTIES COMPILE_FLAGS "-fpeel-loops") \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C/C_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/C/C_interface.hh deleted file mode 100755 index d6092517d..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/C/C_interface.hh +++ /dev/null @@ -1,117 +0,0 @@ -//===================================================== -// File : C_interface.hh -// Author : L. Plagne -// Copyright (C) EDF R&D, lun sep 30 14:23:23 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef C_INTERFACE_HH -#define C_INTERFACE_HH - -#include "f77_interface.hh" - -template -class C_interface : public f77_interface_base { - -public : - - typedef typename f77_interface_base::gene_matrix gene_matrix; - typedef typename f77_interface_base::gene_vector gene_vector; - - static inline std::string name() { return "C"; } - - static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int N) - { -// for (int i=0;i -// Copyright (C) EDF R&D, lun sep 30 14:23:28 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef C_BLAS_PRODUIT_MATRICE_VECTEUR_HH -#define C_BLAS_PRODUIT_MATRICE_VECTEUR_HH - -#include "f77_interface.hh" -#include -extern "C" -{ -#include "cblas.h" - -// #ifdef PUREBLAS -#include "blas.h" -// #endif - -// void sgemm_(const char *transa, const char *transb, const int *m, const int *n, const int *k, -// const float *alpha, const float *a, const int *lda, const float *b, const int *ldb, -// const float *beta, float *c, const int *ldc); -// -// void sgemv_(const char *trans, const int *m, const int *n, const float *alpha, -// const float *a, const int *lda, const float *x, const int *incx, -// const float *beta, float *y, const int *incy); -// -// void ssymv_(const char *trans, const char* uplo, -// const int* N, const float* alpha, const float *A, -// const int* lda, const float *X, const int* incX, -// const float* beta, float *Y, const int* incY); -// -// void sscal_(const int *n, const float *alpha, const float *x, const int *incx); -// -// void saxpy_(const int *n, const float *alpha, const float *x, const int *incx, -// float *y, const int *incy); -// -// void strsv_(const char *uplo, const char *trans, const char *diag, const int *n, -// const float *a, const int *lda, float *x, const int *incx); -// -// void scopy_(const int *n, const float *x, const int *incx, float *y, const int *incy); - - // Cholesky Factorization -// #include "mkl_lapack.h" -// void spotrf_(const char* uplo, const int* n, float *a, const int* ld, int* info); -// void dpotrf_(const char* uplo, const int* n, double *a, const int* ld, int* info); - void ssytrd_(char *uplo, const int *n, float *a, const int *lda, float *d, float *e, float *tau, float *work, int *lwork, int *info ); - void sgehrd_( const int *n, int *ilo, int *ihi, float *a, const int *lda, float *tau, float *work, int *lwork, int *info ); - - // LU row pivoting -// void dgetrf_( int *m, int *n, double *a, int *lda, int *ipiv, int *info ); -// void sgetrf_(const int* m, const int* n, float *a, const int* ld, int* ipivot, int* info); - // LU full pivoting - void sgetc2_(const int* n, float *a, const int *lda, int *ipiv, int *jpiv, int*info ); -#ifdef HAS_LAPACK -#endif -} - -#define MAKE_STRING2(S) #S -#define MAKE_STRING(S) MAKE_STRING2(S) - -template -class C_BLAS_interface : public f77_interface_base -{ -public : - - typedef typename f77_interface_base::gene_matrix gene_matrix; - typedef typename f77_interface_base::gene_vector gene_vector; - - static inline std::string name( void ) - { - return MAKE_STRING(CBLASNAME); - } - - static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N) - { - cblas_dgemv(CblasColMajor,CblasNoTrans,N,N,1.0,A,N,B,1,0.0,X,1); - } - - static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N) - { - cblas_dgemv(CblasColMajor,CblasTrans,N,N,1.0,A,N,B,1,0.0,X,1); - } - - static inline void symv(gene_matrix & A, gene_vector & B, gene_vector & X, int N) - { - cblas_dsymv(CblasColMajor,CblasLower,CblasTrans,N,N,1.0,A,N,B,1,0.0,X,1); - } - - static inline void matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ - cblas_dgemm(CblasColMajor,CblasNoTrans,CblasNoTrans,N,N,N,1.0,A,N,B,N,0.0,X,N); - } - - static inline void transposed_matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ - cblas_dgemm(CblasColMajor,CblasTrans,CblasTrans,N,N,N,1.0,A,N,B,N,0.0,X,N); - } - - static inline void ata_product(gene_matrix & A, gene_matrix & X, int N){ - cblas_dgemm(CblasColMajor,CblasTrans,CblasNoTrans,N,N,N,1.0,A,N,A,N,0.0,X,N); - } - - static inline void aat_product(gene_matrix & A, gene_matrix & X, int N){ - cblas_dgemm(CblasColMajor,CblasNoTrans,CblasTrans,N,N,N,1.0,A,N,A,N,0.0,X,N); - } - - static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int N){ - cblas_daxpy(N,coef,X,1,Y,1); - } - - static inline void axpby(real a, const gene_vector & X, real b, gene_vector & Y, int N){ - cblas_dscal(N,b,Y,1); - cblas_daxpy(N,a,X,1,Y,1); - } - -}; - -static float fone = 1; -static float fzero = 0; -static char notrans = 'N'; -static char trans = 'T'; -static char nonunit = 'N'; -static char lower = 'L'; -static char right = 'R'; -static char left = 'L'; -static int intone = 1; - -template<> -class C_BLAS_interface : public f77_interface_base -{ - -public : - - static inline std::string name( void ) - { - return MAKE_STRING(CBLASNAME); - } - - static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ - #ifdef PUREBLAS - sgemv_(¬rans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); - #else - cblas_sgemv(CblasColMajor,CblasNoTrans,N,N,1.0,A,N,B,1,0.0,X,1); - #endif - } - - static inline void symv(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ - #ifdef PUREBLAS - ssymv_(&lower, &N,&fone,A,&N,B,&intone,&fzero,X,&intone); - #else - cblas_ssymv(CblasColMajor,CblasLower,N,1.0,A,N,B,1,0.0,X,1); - #endif - } - - static inline void syr2(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ - #ifdef PUREBLAS - ssyr2_(&lower,&N,&fone,B,&intone,X,&intone,A,&N); - #else - cblas_ssyr2(CblasColMajor,CblasLower,N,1.0,B,1,X,1,A,N); - #endif - } - - static inline void ger(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){ - #ifdef PUREBLAS - sger_(&N,&N,&fone,X,&intone,Y,&intone,A,&N); - #else - cblas_sger(CblasColMajor,N,N,1.0,X,1,Y,1,A,N); - #endif - } - - static inline void rot(gene_vector & A, gene_vector & B, float c, float s, int N){ - #ifdef PUREBLAS - srot_(&N,A,&intone,B,&intone,&c,&s); - #else - cblas_srot(N,A,1,B,1,c,s); - #endif - } - - static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ - #ifdef PUREBLAS - sgemv_(&trans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); - #else - cblas_sgemv(CblasColMajor,CblasTrans,N,N,1.0,A,N,B,1,0.0,X,1); - #endif - } - - static inline void matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ - #ifdef PUREBLAS - sgemm_(¬rans,¬rans,&N,&N,&N,&fone,A,&N,B,&N,&fzero,X,&N); - #else - cblas_sgemm(CblasColMajor,CblasNoTrans,CblasNoTrans,N,N,N,1.0,A,N,B,N,0.0,X,N); - #endif - } - - static inline void transposed_matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ - #ifdef PUREBLAS - sgemm_(¬rans,¬rans,&N,&N,&N,&fone,A,&N,B,&N,&fzero,X,&N); - #else - cblas_sgemm(CblasColMajor,CblasNoTrans,CblasNoTrans,N,N,N,1.0,A,N,B,N,0.0,X,N); - #endif - } - - static inline void ata_product(gene_matrix & A, gene_matrix & X, int N){ - #ifdef PUREBLAS - sgemm_(&trans,¬rans,&N,&N,&N,&fone,A,&N,A,&N,&fzero,X,&N); - #else - cblas_sgemm(CblasColMajor,CblasTrans,CblasNoTrans,N,N,N,1.0,A,N,A,N,0.0,X,N); - #endif - } - - static inline void aat_product(gene_matrix & A, gene_matrix & X, int N){ - #ifdef PUREBLAS - sgemm_(¬rans,&trans,&N,&N,&N,&fone,A,&N,A,&N,&fzero,X,&N); - #else - cblas_sgemm(CblasColMajor,CblasNoTrans,CblasTrans,N,N,N,1.0,A,N,A,N,0.0,X,N); - #endif - } - - static inline void axpy(float coef, const gene_vector & X, gene_vector & Y, int N){ - #ifdef PUREBLAS - saxpy_(&N,&coef,X,&intone,Y,&intone); - #else - cblas_saxpy(N,coef,X,1,Y,1); - #endif - } - - static inline void axpby(float a, const gene_vector & X, float b, gene_vector & Y, int N){ - #ifdef PUREBLAS - sscal_(&N,&b,Y,&intone); - saxpy_(&N,&a,X,&intone,Y,&intone); - #else - cblas_sscal(N,b,Y,1); - cblas_saxpy(N,a,X,1,Y,1); - #endif - } - - static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ - int N2 = N*N; - scopy_(&N2, X, &intone, C, &intone); - char uplo = 'L'; - int info = 0; - spotrf_(&uplo, &N, C, &N, &info); - if(info!=0) std::cerr << "spotrf_ error " << info << "\n"; - } - - static inline void partial_lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ - int N2 = N*N; - scopy_(&N2, X, &intone, C, &intone); - char uplo = 'L'; - int info = 0; - int * ipiv = (int*)alloca(sizeof(int)*N); - sgetrf_(&N, &N, C, &N, ipiv, &info); - if(info!=0) std::cerr << "sgetrf_ error " << info << "\n"; - } - - #ifdef HAS_LAPACK - - static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ - int N2 = N*N; - scopy_(&N2, X, &intone, C, &intone); - char uplo = 'L'; - int info = 0; - int * ipiv = (int*)alloca(sizeof(int)*N); - int * jpiv = (int*)alloca(sizeof(int)*N); - sgetc2_(&N, C, &N, ipiv, jpiv, &info); - } - - - - static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int N){ -#ifdef PUREBLAS - { - int N2 = N*N; - int inc = 1; - scopy_(&N2, X, &inc, C, &inc); - } -#else - cblas_scopy(N*N, X, 1, C, 1); -#endif - int info = 0; - int ilo = 1; - int ihi = N; - int bsize = 64; - int worksize = N*bsize; - float* d = new float[N+worksize]; - sgehrd_(&N, &ilo, &ihi, C, &N, d, d+N, &worksize, &info); - delete[] d; - } - - static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){ -#ifdef PUREBLAS - { - int N2 = N*N; - int inc = 1; - scopy_(&N2, X, &inc, C, &inc); - } -#else - cblas_scopy(N*N, X, 1, C, 1); -#endif - char uplo = 'U'; - int info = 0; - int ilo = 1; - int ihi = N; - int bsize = 64; - int worksize = N*bsize; - float* d = new float[3*N+worksize]; - ssytrd_(&uplo, &N, C, &N, d, d+N, d+2*N, d+3*N, &worksize, &info); - delete[] d; - } - #endif - - static inline void trisolve_lower(const gene_matrix & L, const gene_vector& B, gene_vector & X, int N){ - #ifdef PUREBLAS - scopy_(&N, B, &intone, X, &intone); - strsv_(&lower, ¬rans, &nonunit, &N, L, &N, X, &intone); - #else - cblas_scopy(N, B, 1, X, 1); - cblas_strsv(CblasColMajor, CblasLower, CblasNoTrans, CblasNonUnit, N, L, N, X, 1); - #endif - } - - static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix & X, int N){ - #ifdef PUREBLAS - scopy_(&N, B, &intone, X, &intone); - strsm_(&right, &lower, ¬rans, &nonunit, &N, &N, &fone, L, &N, X, &N); - #else - cblas_scopy(N, B, 1, X, 1); - cblas_strsm(CblasColMajor, CblasRight, CblasLower, CblasNoTrans, CblasNonUnit, N, N, 1, L, N, X, N); - #endif - } - - static inline void trmm(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N){ - #ifdef PUREBLAS - strmm_(&left, &lower, ¬rans,&nonunit, &N,&N,&fone,A,&N,B,&N); - #else - cblas_strmm(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans,CblasNonUnit, N,N,1,A,N,B,N); - #endif - } - -}; - - -#endif - - - diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/cblas.h b/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/cblas.h deleted file mode 100644 index 4087ffb92..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/cblas.h +++ /dev/null @@ -1,596 +0,0 @@ -#ifndef CBLAS_H - -#ifndef CBLAS_ENUM_DEFINED_H - #define CBLAS_ENUM_DEFINED_H - enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102 }; - enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113, - AtlasConj=114}; - enum CBLAS_UPLO {CblasUpper=121, CblasLower=122}; - enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132}; - enum CBLAS_SIDE {CblasLeft=141, CblasRight=142}; -#endif - -#ifndef CBLAS_ENUM_ONLY -#define CBLAS_H -#define CBLAS_INDEX int - -int cblas_errprn(int ierr, int info, char *form, ...); - -/* - * =========================================================================== - * Prototypes for level 1 BLAS functions (complex are recast as routines) - * =========================================================================== - */ -float cblas_sdsdot(const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY); -double cblas_dsdot(const int N, const float *X, const int incX, const float *Y, - const int incY); -float cblas_sdot(const int N, const float *X, const int incX, - const float *Y, const int incY); -double cblas_ddot(const int N, const double *X, const int incX, - const double *Y, const int incY); -/* - * Functions having prefixes Z and C only - */ -void cblas_cdotu_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotu); -void cblas_cdotc_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotc); - -void cblas_zdotu_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotu); -void cblas_zdotc_sub(const int N, const void *X, const int incX, - const void *Y, const int incY, void *dotc); - - -/* - * Functions having prefixes S D SC DZ - */ -float cblas_snrm2(const int N, const float *X, const int incX); -float cblas_sasum(const int N, const float *X, const int incX); - -double cblas_dnrm2(const int N, const double *X, const int incX); -double cblas_dasum(const int N, const double *X, const int incX); - -float cblas_scnrm2(const int N, const void *X, const int incX); -float cblas_scasum(const int N, const void *X, const int incX); - -double cblas_dznrm2(const int N, const void *X, const int incX); -double cblas_dzasum(const int N, const void *X, const int incX); - - -/* - * Functions having standard 4 prefixes (S D C Z) - */ -CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX); -CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX); -CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX); -CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX); - -/* - * =========================================================================== - * Prototypes for level 1 BLAS routines - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (s, d, c, z) - */ -void cblas_sswap(const int N, float *X, const int incX, - float *Y, const int incY); -void cblas_scopy(const int N, const float *X, const int incX, - float *Y, const int incY); -void cblas_saxpy(const int N, const float alpha, const float *X, - const int incX, float *Y, const int incY); -void catlas_saxpby(const int N, const float alpha, const float *X, - const int incX, const float beta, float *Y, const int incY); -void catlas_sset - (const int N, const float alpha, float *X, const int incX); - -void cblas_dswap(const int N, double *X, const int incX, - double *Y, const int incY); -void cblas_dcopy(const int N, const double *X, const int incX, - double *Y, const int incY); -void cblas_daxpy(const int N, const double alpha, const double *X, - const int incX, double *Y, const int incY); -void catlas_daxpby(const int N, const double alpha, const double *X, - const int incX, const double beta, double *Y, const int incY); -void catlas_dset - (const int N, const double alpha, double *X, const int incX); - -void cblas_cswap(const int N, void *X, const int incX, - void *Y, const int incY); -void cblas_ccopy(const int N, const void *X, const int incX, - void *Y, const int incY); -void cblas_caxpy(const int N, const void *alpha, const void *X, - const int incX, void *Y, const int incY); -void catlas_caxpby(const int N, const void *alpha, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void catlas_cset - (const int N, const void *alpha, void *X, const int incX); - -void cblas_zswap(const int N, void *X, const int incX, - void *Y, const int incY); -void cblas_zcopy(const int N, const void *X, const int incX, - void *Y, const int incY); -void cblas_zaxpy(const int N, const void *alpha, const void *X, - const int incX, void *Y, const int incY); -void catlas_zaxpby(const int N, const void *alpha, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void catlas_zset - (const int N, const void *alpha, void *X, const int incX); - - -/* - * Routines with S and D prefix only - */ -void cblas_srotg(float *a, float *b, float *c, float *s); -void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P); -void cblas_srot(const int N, float *X, const int incX, - float *Y, const int incY, const float c, const float s); -void cblas_srotm(const int N, float *X, const int incX, - float *Y, const int incY, const float *P); - -void cblas_drotg(double *a, double *b, double *c, double *s); -void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P); -void cblas_drot(const int N, double *X, const int incX, - double *Y, const int incY, const double c, const double s); -void cblas_drotm(const int N, double *X, const int incX, - double *Y, const int incY, const double *P); - - -/* - * Routines with S D C Z CS and ZD prefixes - */ -void cblas_sscal(const int N, const float alpha, float *X, const int incX); -void cblas_dscal(const int N, const double alpha, double *X, const int incX); -void cblas_cscal(const int N, const void *alpha, void *X, const int incX); -void cblas_zscal(const int N, const void *alpha, void *X, const int incX); -void cblas_csscal(const int N, const float alpha, void *X, const int incX); -void cblas_zdscal(const int N, const double alpha, void *X, const int incX); - -/* - * Extra reference routines provided by ATLAS, but not mandated by the standard - */ -void cblas_crotg(void *a, void *b, void *c, void *s); -void cblas_zrotg(void *a, void *b, void *c, void *s); -void cblas_csrot(const int N, void *X, const int incX, void *Y, const int incY, - const float c, const float s); -void cblas_zdrot(const int N, void *X, const int incX, void *Y, const int incY, - const double c, const double s); - -/* - * =========================================================================== - * Prototypes for level 2 BLAS - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (S, D, C, Z) - */ -void cblas_sgemv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *X, const int incX, const float beta, - float *Y, const int incY); -void cblas_sgbmv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const float alpha, - const float *A, const int lda, const float *X, - const int incX, const float beta, float *Y, const int incY); -void cblas_strmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *A, const int lda, - float *X, const int incX); -void cblas_stbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const float *A, const int lda, - float *X, const int incX); -void cblas_stpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *Ap, float *X, const int incX); -void cblas_strsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *A, const int lda, float *X, - const int incX); -void cblas_stbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const float *A, const int lda, - float *X, const int incX); -void cblas_stpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const float *Ap, float *X, const int incX); - -void cblas_dgemv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const double alpha, const double *A, const int lda, - const double *X, const int incX, const double beta, - double *Y, const int incY); -void cblas_dgbmv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const double alpha, - const double *A, const int lda, const double *X, - const int incX, const double beta, double *Y, const int incY); -void cblas_dtrmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *A, const int lda, - double *X, const int incX); -void cblas_dtbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const double *A, const int lda, - double *X, const int incX); -void cblas_dtpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *Ap, double *X, const int incX); -void cblas_dtrsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *A, const int lda, double *X, - const int incX); -void cblas_dtbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const double *A, const int lda, - double *X, const int incX); -void cblas_dtpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const double *Ap, double *X, const int incX); - -void cblas_cgemv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *X, const int incX, const void *beta, - void *Y, const int incY); -void cblas_cgbmv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const void *alpha, - const void *A, const int lda, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void cblas_ctrmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, - void *X, const int incX); -void cblas_ctbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ctpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); -void cblas_ctrsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, void *X, - const int incX); -void cblas_ctbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ctpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); - -void cblas_zgemv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *X, const int incX, const void *beta, - void *Y, const int incY); -void cblas_zgbmv(const enum CBLAS_ORDER Order, - const enum CBLAS_TRANSPOSE TransA, const int M, const int N, - const int KL, const int KU, const void *alpha, - const void *A, const int lda, const void *X, - const int incX, const void *beta, void *Y, const int incY); -void cblas_ztrmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, - void *X, const int incX); -void cblas_ztbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ztpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); -void cblas_ztrsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *A, const int lda, void *X, - const int incX); -void cblas_ztbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const int K, const void *A, const int lda, - void *X, const int incX); -void cblas_ztpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag, - const int N, const void *Ap, void *X, const int incX); - - -/* - * Routines with S and D prefixes only - */ -void cblas_ssymv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *A, - const int lda, const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_ssbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const float alpha, const float *A, - const int lda, const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_sspmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *Ap, - const float *X, const int incX, - const float beta, float *Y, const int incY); -void cblas_sger(const enum CBLAS_ORDER Order, const int M, const int N, - const float alpha, const float *X, const int incX, - const float *Y, const int incY, float *A, const int lda); -void cblas_ssyr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, float *A, const int lda); -void cblas_sspr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, float *Ap); -void cblas_ssyr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY, float *A, - const int lda); -void cblas_sspr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const float *X, - const int incX, const float *Y, const int incY, float *A); - -void cblas_dsymv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dsbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dspmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *Ap, - const double *X, const int incX, - const double beta, double *Y, const int incY); -void cblas_dger(const enum CBLAS_ORDER Order, const int M, const int N, - const double alpha, const double *X, const int incX, - const double *Y, const int incY, double *A, const int lda); -void cblas_dsyr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, double *A, const int lda); -void cblas_dspr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, double *Ap); -void cblas_dsyr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, const double *Y, const int incY, double *A, - const int lda); -void cblas_dspr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const double *X, - const int incX, const double *Y, const int incY, double *A); - - -/* - * Routines with C and Z prefixes only - */ -void cblas_chemv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_chbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_chpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *Ap, - const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_cgeru(const enum CBLAS_ORDER Order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_cgerc(const enum CBLAS_ORDER Order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_cher(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const void *X, const int incX, - void *A, const int lda); -void cblas_chpr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const float alpha, const void *X, - const int incX, void *A); -void cblas_cher2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_chpr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *Ap); - -void cblas_zhemv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zhbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const int K, const void *alpha, const void *A, - const int lda, const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zhpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const void *alpha, const void *Ap, - const void *X, const int incX, - const void *beta, void *Y, const int incY); -void cblas_zgeru(const enum CBLAS_ORDER Order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zgerc(const enum CBLAS_ORDER Order, const int M, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zher(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const void *X, const int incX, - void *A, const int lda); -void cblas_zhpr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const int N, const double alpha, const void *X, - const int incX, void *A); -void cblas_zher2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *A, const int lda); -void cblas_zhpr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, - const void *alpha, const void *X, const int incX, - const void *Y, const int incY, void *Ap); - -/* - * =========================================================================== - * Prototypes for level 3 BLAS - * =========================================================================== - */ - -/* - * Routines with standard 4 prefixes (S, D, C, Z) - */ -void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const float alpha, const float *A, - const int lda, const float *B, const int ldb, - const float beta, float *C, const int ldc); -void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, const float beta, - float *C, const int ldc); -void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const float *A, const int lda, - const float beta, float *C, const int ldc); -void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const float *A, const int lda, - const float *B, const int ldb, const float beta, - float *C, const int ldc); -void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const float alpha, const float *A, const int lda, - float *B, const int ldb); -void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const float alpha, const float *A, const int lda, - float *B, const int ldb); - -void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const double alpha, const double *A, - const int lda, const double *B, const int ldb, - const double beta, double *C, const int ldc); -void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const double alpha, const double *A, const int lda, - const double *B, const int ldb, const double beta, - double *C, const int ldc); -void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const double *A, const int lda, - const double beta, double *C, const int ldc); -void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const double *A, const int lda, - const double *B, const int ldb, const double beta, - double *C, const int ldc); -void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const double alpha, const double *A, const int lda, - double *B, const int ldb); -void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const double alpha, const double *A, const int lda, - double *B, const int ldb); - -void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const void *alpha, const void *A, - const int lda, const void *B, const int ldb, - const void *beta, void *C, const int ldc); -void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *beta, void *C, const int ldc); -void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); -void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); - -void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_TRANSPOSE TransB, const int M, const int N, - const int K, const void *alpha, const void *A, - const int lda, const void *B, const int ldb, - const void *beta, void *C, const int ldc); -void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *beta, void *C, const int ldc); -void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); -void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA, - const enum CBLAS_DIAG Diag, const int M, const int N, - const void *alpha, const void *A, const int lda, - void *B, const int ldb); - - -/* - * Routines with prefixes C and Z only - */ -void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const float alpha, const void *A, const int lda, - const float beta, void *C, const int ldc); -void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const float beta, - void *C, const int ldc); -void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side, - const enum CBLAS_UPLO Uplo, const int M, const int N, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const void *beta, - void *C, const int ldc); -void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const double alpha, const void *A, const int lda, - const double beta, void *C, const int ldc); -void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, - const enum CBLAS_TRANSPOSE Trans, const int N, const int K, - const void *alpha, const void *A, const int lda, - const void *B, const int ldb, const double beta, - void *C, const int ldc); - -int cblas_errprn(int ierr, int info, char *form, ...); - -#endif /* end #ifdef CBLAS_ENUM_ONLY */ -#endif diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/main.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/main.cpp deleted file mode 100644 index 99f512f9b..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/C_BLAS/main.cpp +++ /dev/null @@ -1,73 +0,0 @@ -//===================================================== -// File : main.cpp -// Author : L. Plagne -// Copyright (C) EDF R&D, lun sep 30 14:23:28 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#include "utilities.h" -#include "C_BLAS_interface.hh" -#include "bench.hh" -#include "basic_actions.hh" - -#include "action_cholesky.hh" -#include "action_lu_decomp.hh" -#include "action_partial_lu.hh" -#include "action_trisolve_matrix.hh" - -#ifdef HAS_LAPACK -#include "action_hessenberg.hh" -#endif - -BTL_MAIN; - -int main() -{ - - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_MV,MAX_MV,NB_POINT); - - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - - #ifdef HAS_LAPACK - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - #endif - - //bench > >(MIN_LU,MAX_LU,NB_POINT); - - return 0; -} - - diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/STL/STL_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/STL/STL_interface.hh index 0b73382f3..93e76bd55 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/STL/STL_interface.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/STL/STL_interface.hh @@ -78,18 +78,18 @@ public : cible[i][j]=source[i][j]; } - static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N) - { - real somme; - for (int j=0;j=j) + { + for (int k=0;k -// Copyright (C) EDF R&D, lun sep 30 14:23:24 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef STL_ALGO_INTERFACE_HH -#define STL_ALGO_INTERFACE_HH -#include -#include -#include -#include -#include "utilities.h" - - -template -class STL_algo_interface{ - -public : - - typedef real real_type ; - - typedef std::vector stl_vector; - typedef std::vector stl_matrix; - - typedef stl_matrix gene_matrix; - - typedef stl_vector gene_vector; - - static inline std::string name( void ) - { - return "STL_algo"; - } - - static void free_matrix(gene_matrix & A, int N){} - - static void free_vector(gene_vector & B){} - - static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ - A=A_stl ; - } - - static inline void vector_from_stl(gene_vector & B, stl_vector & B_stl){ - B=B_stl ; - } - - static inline void vector_to_stl(gene_vector & B, stl_vector & B_stl){ - B_stl=B ; - } - - static inline void matrix_to_stl(gene_matrix & A, stl_matrix & A_stl){ - A_stl=A ; - } - - static inline void copy_vector(const gene_vector & source, gene_vector & cible, int N){ - for (int i=0;i -// Copyright (C) EDF R&D, lun sep 30 14:23:25 CEST 2002 +// Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or @@ -18,28 +16,29 @@ // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" -#include "f77_interface.hh" -#include "bench.hh" +#include "eigen3_interface.hh" +#include "static/bench_static.hh" #include "action_matrix_vector_product.hh" #include "action_matrix_matrix_product.hh" #include "action_axpy.hh" #include "action_lu_solve.hh" #include "action_ata_product.hh" #include "action_aat_product.hh" +#include "action_atv_product.hh" +#include "action_cholesky.hh" +#include "action_trisolve.hh" BTL_MAIN; int main() { - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - bench > >(MIN_MV,MAX_MV,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); - - bench > >(MIN_MM,MAX_MM,NB_POINT); + bench_static(); + bench_static(); + bench_static(); + bench_static(); + bench_static(); + bench_static(); return 0; } diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/eigen2_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/eigen2_interface.hh new file mode 100644 index 000000000..47fe58135 --- /dev/null +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/eigen2_interface.hh @@ -0,0 +1,168 @@ +//===================================================== +// Copyright (C) 2008 Gael Guennebaud +//===================================================== +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +#ifndef EIGEN2_INTERFACE_HH +#define EIGEN2_INTERFACE_HH +// #include +#include +#include +#include +#include +#include +#include "btl.hh" + +using namespace Eigen; + +template +class eigen2_interface +{ + +public : + + enum {IsFixedSize = (SIZE!=Dynamic)}; + + typedef real real_type; + + typedef std::vector stl_vector; + typedef std::vector stl_matrix; + + typedef Eigen::Matrix gene_matrix; + typedef Eigen::Matrix gene_vector; + + static inline std::string name( void ) + { + #if defined(EIGEN_VECTORIZE_SSE) + if (SIZE==Dynamic) return "eigen2"; else return "tiny_eigen2"; + #elif defined(EIGEN_VECTORIZE_ALTIVEC) + if (SIZE==Dynamic) return "eigen2"; else return "tiny_eigen2"; + #else + if (SIZE==Dynamic) return "eigen2_novec"; else return "tiny_eigen2_novec"; + #endif + } + + static void free_matrix(gene_matrix & A, int N) {} + + static void free_vector(gene_vector & B) {} + + static BTL_DONT_INLINE void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ + A.resize(A_stl[0].size(), A_stl.size()); + + for (int j=0; j().solveTriangular(B); + } + + static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int N){ + X = L.template marked().solveTriangular(B); + } + + static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ + C = X.llt().matrixL(); +// C = X; +// Cholesky::computeInPlace(C); +// Cholesky::computeInPlaceBlock(C); + } + + static inline void lu_decomp(const gene_matrix & X, gene_matrix & C, int N){ + C = X.lu().matrixLU(); +// C = X.inverse(); + } + + static inline void tridiagonalization(const gene_matrix & X, gene_matrix & C, int N){ + C = Tridiagonalization(X).packedMatrix(); + } + + static inline void hessenberg(const gene_matrix & X, gene_matrix & C, int N){ + C = HessenbergDecomposition(X).packedMatrix(); + } + + + +}; + +#endif diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_adv.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_adv.cpp new file mode 100644 index 000000000..fe3368925 --- /dev/null +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_adv.cpp @@ -0,0 +1,44 @@ +//===================================================== +// Copyright (C) 2008 Gael Guennebaud +//===================================================== +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +#include "utilities.h" +#include "eigen2_interface.hh" +#include "bench.hh" +#include "action_trisolve.hh" +#include "action_trisolve_matrix.hh" +#include "action_cholesky.hh" +#include "action_hessenberg.hh" +#include "action_lu_decomp.hh" +// #include "action_partial_lu.hh" + +BTL_MAIN; + +int main() +{ + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); + + bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); + + return 0; +} + + diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/STL_algo/main.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_linear.cpp similarity index 70% rename from gtsam/3rdparty/Eigen/bench/btl/libs/STL_algo/main.cpp rename to gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_linear.cpp index 9ce2d947e..c17d16c08 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/STL_algo/main.cpp +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_linear.cpp @@ -1,7 +1,5 @@ //===================================================== -// File : main.cpp -// Author : L. Plagne -// Copyright (C) EDF R&D, lun sep 30 14:23:23 CEST 2002 +// Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or @@ -18,21 +16,18 @@ // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" -#include "STL_algo_interface.hh" +#include "eigen2_interface.hh" #include "bench.hh" -#include "action_atv_product.hh" -#include "action_axpy.hh" +#include "basic_actions.hh" BTL_MAIN; int main() { - - bench > >(MIN_MV,MAX_MV,NB_POINT); - - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); + return 0; } diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/test_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_matmat.cpp similarity index 59% rename from gtsam/3rdparty/Eigen/bench/btl/libs/f77/test_interface.hh rename to gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_matmat.cpp index 230c8dbc8..cd9dc9cb0 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/test_interface.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_matmat.cpp @@ -1,14 +1,12 @@ //===================================================== -// File : test_interface.hh -// Author : L. Plagne -// Copyright (C) EDF R&D, lun sep 30 14:23:25 CEST 2002 +// Copyright (C) 2008 Gael Guennebaud //===================================================== -// +// // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. -// +// // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the @@ -16,21 +14,22 @@ // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef TEST_INTERFACE_HH -#define TEST_INTERFACE_HH - -template< class Interface > -void test_interface( void ){ - - Interface::interface_name(); - - typename Interface::gene_matrix A; - - +// +#include "utilities.h" +#include "eigen2_interface.hh" +#include "bench.hh" +#include "basic_actions.hh" +BTL_MAIN; +int main() +{ + bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); + return 0; } -#endif + diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/C/main.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_vecmat.cpp similarity index 50% rename from gtsam/3rdparty/Eigen/bench/btl/libs/C/main.cpp rename to gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_vecmat.cpp index f0a0e5c65..8b66cd2d9 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/C/main.cpp +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen2/main_vecmat.cpp @@ -1,7 +1,5 @@ //===================================================== -// File : main.cpp -// Author : L. Plagne -// Copyright (C) EDF R&D, lun sep 30 14:23:23 CEST 2002 +// Copyright (C) 2008 Gael Guennebaud //===================================================== // // This program is free software; you can redistribute it and/or @@ -18,29 +16,19 @@ // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // #include "utilities.h" +#include "eigen2_interface.hh" #include "bench.hh" -#include "C_interface.hh" -#include "action_matrix_vector_product.hh" -#include "action_atv_product.hh" -#include "action_matrix_matrix_product.hh" -#include "action_axpy.hh" -#include "action_ata_product.hh" -#include "action_aat_product.hh" -//#include "action_lu_solve.hh" -#include "timers/mixed_perf_analyzer.hh" +#include "basic_actions.hh" BTL_MAIN; int main() { - - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - + bench > >(MIN_MV,MAX_MV,NB_POINT); + bench > >(MIN_MV,MAX_MV,NB_POINT); +// bench > >(MIN_MV,MAX_MV,NB_POINT); +// bench > >(MIN_MV,MAX_MV,NB_POINT); +// bench > >(MIN_MV,MAX_MV,NB_POINT); return 0; } diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/CMakeLists.txt b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/CMakeLists.txt index 334eb14a8..00cae23d3 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/CMakeLists.txt @@ -1,5 +1,13 @@ -find_package(Eigen3) + +if((NOT EIGEN3_INCLUDE_DIR) AND Eigen_SOURCE_DIR) + # unless EIGEN3_INCLUDE_DIR is defined, let's use current Eigen version + set(EIGEN3_INCLUDE_DIR ${Eigen_SOURCE_DIR}) + set(EIGEN3_FOUND TRUE) +else() + find_package(Eigen3) +endif() + if (EIGEN3_FOUND) include_directories(${EIGEN3_INCLUDE_DIR}) @@ -28,10 +36,10 @@ if (EIGEN3_FOUND) if(NOT BTL_NOVEC) - btl_add_bench(btl_eigen3_novec_linear main_linear.cpp) - btl_add_bench(btl_eigen3_novec_vecmat main_vecmat.cpp) - btl_add_bench(btl_eigen3_novec_matmat main_matmat.cpp) - btl_add_bench(btl_eigen3_novec_adv main_adv.cpp ) + btl_add_bench(btl_eigen3_novec_linear main_linear.cpp OFF) + btl_add_bench(btl_eigen3_novec_vecmat main_vecmat.cpp OFF) + btl_add_bench(btl_eigen3_novec_matmat main_matmat.cpp OFF) + btl_add_bench(btl_eigen3_novec_adv main_adv.cpp OFF) btl_add_target_property(btl_eigen3_novec_linear COMPILE_FLAGS "-fno-exceptions -DEIGEN_DONT_VECTORIZE -DBTL_PREFIX=eigen3_novec") btl_add_target_property(btl_eigen3_novec_vecmat COMPILE_FLAGS "-fno-exceptions -DEIGEN_DONT_VECTORIZE -DBTL_PREFIX=eigen3_novec") btl_add_target_property(btl_eigen3_novec_matmat COMPILE_FLAGS "-fno-exceptions -DEIGEN_DONT_VECTORIZE -DBTL_PREFIX=eigen3_novec") diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/eigen3_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/eigen3_interface.hh index bd5eb4b6b..31bcc1f93 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/eigen3_interface.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/eigen3_interface.hh @@ -92,12 +92,13 @@ public : X.noalias() = A.transpose()*B.transpose(); } - static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){ - X.noalias() = A.transpose()*A; - } +// static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){ +// X.noalias() = A.transpose()*A; +// } static inline void aat_product(const gene_matrix & A, gene_matrix & X, int N){ - X.noalias() = A*A.transpose(); + X.template triangularView().setZero(); + X.template selfadjointView().rankUpdate(A); } static inline void matrix_vector_product(const gene_matrix & A, const gene_vector & B, gene_vector & X, int N){ @@ -194,16 +195,16 @@ public : } static inline void trisolve_lower_matrix(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int N){ - X = L.template triangularView().solve(B); + X = L.template triangularView().solve(B); } static inline void trmm(const gene_matrix & L, const gene_matrix& B, gene_matrix& X, int N){ - X = L.template triangularView() * B; + X.noalias() = L.template triangularView() * B; } static inline void cholesky(const gene_matrix & X, gene_matrix & C, int N){ C = X; - internal::llt_inplace::blocked(C); + internal::llt_inplace::blocked(C); //C = X.llt().matrixL(); // C = X; // Cholesky::computeInPlace(C); diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/main_matmat.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/main_matmat.cpp index 052810a16..926fa2b01 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/main_matmat.cpp +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/eigen3/main_matmat.cpp @@ -25,7 +25,7 @@ BTL_MAIN; int main() { bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/CMakeLists.txt b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/CMakeLists.txt deleted file mode 100644 index cecb9160c..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -if(CMAKE_MINOR_VERSION GREATER 4) - if(NOT MSVC) - enable_language(Fortran) - endif(NOT MSVC) - btl_add_bench(btl_f77 main.cpp dmxv.f smxv.f dmxm.f smxm.f daxpy.f saxpy.f data.f sata.f daat.f saat.f OFF) -endif(CMAKE_MINOR_VERSION GREATER 4) \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/daat.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/daat.f deleted file mode 100644 index a50329a66..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/daat.f +++ /dev/null @@ -1,14 +0,0 @@ - SUBROUTINE DAAT(A,X,N) -** -** X = AT * A - REAL*8 A(N,N),X(N,N),R - DO 20 I=1,N - DO 20 J=1,N - R=0. - DO 10 K=1,N - R=R+A(I,K)*A(J,K) - 10 CONTINUE - X(I,J)=R - 20 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/data.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/data.f deleted file mode 100644 index 709211ca5..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/data.f +++ /dev/null @@ -1,14 +0,0 @@ - SUBROUTINE DATA(A,X,N) -** -** X = AT * A - REAL*8 A(N,N),X(N,N),R - DO 20 I=1,N - DO 20 J=1,N - R=0. - DO 10 K=1,N - R=R+A(K,I)*A(K,J) - 10 CONTINUE - X(I,J)=R - 20 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/daxpy.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/daxpy.f deleted file mode 100644 index 29514a222..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/daxpy.f +++ /dev/null @@ -1,18 +0,0 @@ - SUBROUTINE DAXPYF(N,A,X,Y) -** *************************************** -** CALCULE Y = Y + A*X -** *************************************** -*>N NOMBRE D'OPERATIONS A FAIRE -*>A CONSTANTE MULTIPLICATIVE -*>X TABLEAU -*=Y TABLEAU DES RESULTATS -*A R. SANCHEZ ( EARLY WINTER 1987 ) -*V M.F. ROBEAU - REAL*8 X(1),Y(1) - REAL*8 A - DO 10 I=1,N - Y(I)=Y(I)+A*X(I) - 10 CONTINUE - RETURN - END - diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f deleted file mode 100644 index eb7ef9006..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f +++ /dev/null @@ -1,32 +0,0 @@ - SUBROUTINE DMXM(A,N,B,M,C,L) -** -** C = A * B -** A ET B MATRICES A(N,M) B(M,L) ==> C(N,L) -** -*>A PREMIERE MATRICE -*>N PREMIERE DIMENSION DE A ET DE C -*>B DEUXIEME MATRICE -*>M DEUXIEME DIMENSION DE A ET PERMIERE DE B -*L DEUXIEME DIMENSION DE B ET DE C -*A R. SANCHEZ ( EARLY WINTER 1987 ) -*V M.F. ROBEAU -*M AM BAUDRON - AVRIL 94 -*: ERREUR DANS L'APPEL A L'UTILITAIRE SGEMM -*: APPEL A L'UTILITAIRE SGEMM DE LA LIBRAIRIE BLAS SUR HP -*M AM BAUDRON - NOVEMBRE 1991 -*: ERREUR ( SOMME SUR LES TERMES PAS FAITE ) -*: APPEL A L'UTILITAIRE SGEMM DE LA LIBRAIRIE BLAS SUR RISC -*M AM BAUDRON - MAI 1993 -*: CHANGEMENT DES %IF LOCAL SUN MIPS SUITE A INTRODUCTION VERSION IBM - REAL*8 A(N,M),B(M,L),C(N,L),R - DO 20 I=1,N - DO 20 J=1,L - R=0. - DO 10 K=1,M - R=R+A(I,K)*B(K,J) - 10 CONTINUE - C(I,J)=R - 20 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f.mfr b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f.mfr deleted file mode 100644 index 82ccac9a5..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxm.f.mfr +++ /dev/null @@ -1,36 +0,0 @@ - - SUBROUTINE DMXM(A,N,B,M,C,L) -** -** C = A * B -** A ET B MATRICES A(N,M) B(M,L) ==> C(N,L) -** -*>A PREMIERE MATRICE -*>N PREMIERE DIMENSION DE A ET DE C -*>B DEUXIEME MATRICE -*>M DEUXIEME DIMENSION DE A ET PERMIERE DE B -*L DEUXIEME DIMENSION DE B ET DE C -*A R. SANCHEZ ( EARLY WINTER 1987 ) -*V M.F. ROBEAU -*M AM BAUDRON - AVRIL 94 -*: ERREUR DANS L'APPEL A L'UTILITAIRE SGEMM -*: APPEL A L'UTILITAIRE SGEMM DE LA LIBRAIRIE BLAS SUR HP -*M AM BAUDRON - NOVEMBRE 1991 -*: ERREUR ( SOMME SUR LES TERMES PAS FAITE ) -*: APPEL A L'UTILITAIRE SGEMM DE LA LIBRAIRIE BLAS SUR RISC -*M AM BAUDRON - MAI 1993 -*: CHANGEMENT DES %IF LOCAL SUN MIPS SUITE A INTRODUCTION VERSION IBM - REAL*8 A(N,M),B(M,L),C(N,L),R - DO 5 J=1,L - DO 5 I=1,N - C(I,J)=0. - 5 CONTINUE - DO 10 K=1,M - DO 20 J=1,L - R=B(K,J) - DO 20 I=1,N - C(I,J)=C(I,J)+A(I,K)*R - 20 CONTINUE - 10 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxv.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxv.f deleted file mode 100644 index bd7e4d550..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/dmxv.f +++ /dev/null @@ -1,39 +0,0 @@ - SUBROUTINE DMXV(A,N,X,M,R) -C -** -** VERSION DOUBLE PRECISION DE MXV -** R = A * X -** A MATRICE A(N,M) -** R ET X VECTEURS -** -*>A PREMIERE MATRICE -*>N PREMIERE DIMENSION DE A -*>X VECTEUR -*>M DEUXIEME DIMENSION DE A -* -// Copyright (C) EDF R&D, lun sep 30 14:23:24 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef F77_INTERFACE_HH -#define F77_INTERFACE_HH -#include "f77_interface_base.hh" -#include - -extern "C" -{ - void dmxv_(double * A, int * N, double * X, int * M, double *R); - void smxv_(float * A, int * N, float * X, int * M, float *R); - - void dmxm_(double * A, int * N, double * B, int * M, double *C, int * K); - void smxm_(float * A, int * N, float * B, int * M, float *C, int * K); - - void data_(double * A, double *X, int * N); - void sata_(float * A, float *X, int * N); - - void daat_(double * A, double *X, int * N); - void saat_(float * A, float *X, int * N); - - void saxpyf_(int * N, float * coef, float * X, float *Y); - void daxpyf_(int * N, double * coef, double * X, double *Y); -} - -template -class f77_interface : public f77_interface_base -{ -public : - - typedef typename f77_interface_base::gene_matrix gene_matrix; - typedef typename f77_interface_base::gene_vector gene_vector; - - static inline std::string name( void ) - { - return "f77"; - } - - static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N) - { - dmxv_(A,&N,B,&N,X); - } - - static inline void matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N) - { - dmxm_(A,&N,B,&N,X,&N); - } - - static inline void ata_product(gene_matrix & A, gene_matrix & X, int N) - { - data_(A,X,&N); - } - - static inline void aat_product(gene_matrix & A, gene_matrix & X, int N) - { - daat_(A,X,&N); - } - - static inline void axpy(real coef, const gene_vector & X, gene_vector & Y, int N) - { - int one=1; - daxpyf_(&N,&coef,X,Y); - } - - -}; - - -template<> -class f77_interface : public f77_interface_base -{ -public : - - static inline std::string name( void ) - { - return "F77"; - } - - - static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N) - { - smxv_(A,&N,B,&N,X); - } - - static inline void matrix_matrix_product(gene_matrix & A, gene_matrix & B, gene_matrix & X, int N) - { - smxm_(A,&N,B,&N,X,&N); - } - - static inline void ata_product(gene_matrix & A, gene_matrix & X, int N) - { - sata_(A,X,&N); - } - - static inline void aat_product(gene_matrix & A, gene_matrix & X, int N) - { - saat_(A,X,&N); - } - - - static inline void axpy(float coef, const gene_vector & X, gene_vector & Y, int N) - { - saxpyf_(&N,&coef,X,Y); - } - -}; - - -#endif - - - diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/saat.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/saat.f deleted file mode 100644 index 5d1855d2c..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/saat.f +++ /dev/null @@ -1,14 +0,0 @@ - SUBROUTINE SAAT(A,X,N) -** -** X = AT * A - REAL*4 A(N,N),X(N,N) - DO 20 I=1,N - DO 20 J=1,N - R=0. - DO 10 K=1,N - R=R+A(I,K)*A(J,K) - 10 CONTINUE - X(I,J)=R - 20 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/sata.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/sata.f deleted file mode 100644 index 3ab83d958..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/sata.f +++ /dev/null @@ -1,14 +0,0 @@ - SUBROUTINE SATA(A,X,N) -** -** X = AT * A - REAL*4 A(N,N),X(N,N) - DO 20 I=1,N - DO 20 J=1,N - R=0. - DO 10 K=1,N - R=R+A(K,I)*A(K,J) - 10 CONTINUE - X(I,J)=R - 20 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/saxpy.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/saxpy.f deleted file mode 100644 index d0f74fd70..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/saxpy.f +++ /dev/null @@ -1,16 +0,0 @@ - SUBROUTINE SAXPYF(N,A,X,Y) -** *************************************** -** CALCULE Y = Y + A*X -** *************************************** -*>N NOMBRE D'OPERATIONS A FAIRE -*>A CONSTANTE MULTIPLICATIVE -*>X TABLEAU -*=Y TABLEAU DES RESULTATS -*A R. SANCHEZ ( EARLY WINTER 1987 ) -*V M.F. ROBEAU - DIMENSION X(1),Y(1) - DO 10 I=1,N - Y(I)=Y(I)+A*X(I) - 10 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxm.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxm.f deleted file mode 100644 index a1e63adca..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxm.f +++ /dev/null @@ -1,32 +0,0 @@ - SUBROUTINE SMXM(A,N,B,M,C,L) -** -** C = A * B -** A ET B MATRICES A(N,M) B(M,L) ==> C(N,L) -** -*>A PREMIERE MATRICE -*>N PREMIERE DIMENSION DE A ET DE C -*>B DEUXIEME MATRICE -*>M DEUXIEME DIMENSION DE A ET PERMIERE DE B -*L DEUXIEME DIMENSION DE B ET DE C -*A R. SANCHEZ ( EARLY WINTER 1987 ) -*V M.F. ROBEAU -*M AM BAUDRON - AVRIL 94 -*: ERREUR DANS L'APPEL A L'UTILITAIRE SGEMM -*: APPEL A L'UTILITAIRE SGEMM DE LA LIBRAIRIE BLAS SUR HP -*M AM BAUDRON - NOVEMBRE 1991 -*: ERREUR ( SOMME SUR LES TERMES PAS FAITE ) -*: APPEL A L'UTILITAIRE SGEMM DE LA LIBRAIRIE BLAS SUR RISC -*M AM BAUDRON - MAI 1993 -*: CHANGEMENT DES %IF LOCAL SUN MIPS SUITE A INTRODUCTION VERSION IBM - DIMENSION A(N,M),B(M,L),C(N,L) - DO 20 I=1,N - DO 20 J=1,L - R=0. - DO 10 K=1,M - R=R+A(I,K)*B(K,J) - 10 CONTINUE - C(I,J)=R - 20 CONTINUE - RETURN - END diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxv.f b/gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxv.f deleted file mode 100644 index d2f7ed24e..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/f77/smxv.f +++ /dev/null @@ -1,38 +0,0 @@ - SUBROUTINE SMXV(A,N,X,M,R) -C -** -** VERSION DOUBLE PRECISION DE MXV -** R = A * X -** A MATRICE A(N,M) -** R ET X VECTEURS -** -*>A PREMIERE MATRICE -*>N PREMIERE DIMENSION DE A -*>X VECTEUR -*>M DEUXIEME DIMENSION DE A -* ipvt(N); gmm::lu_factor(R, ipvt); diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/gmm/main.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/gmm/main.cpp index b1f51edb6..1f0c051eb 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/gmm/main.cpp +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/gmm/main.cpp @@ -20,7 +20,7 @@ #include "bench.hh" #include "basic_actions.hh" #include "action_hessenberg.hh" -#include "action_lu_decomp.hh" +#include "action_partial_lu.hh" BTL_MAIN; @@ -34,13 +34,13 @@ int main() bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); //bench > >(MIN_LU,MAX_LU,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); + bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/CMakeLists.txt b/gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/CMakeLists.txt deleted file mode 100644 index 3fffbfe8a..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -find_package(Eigen2) -if (EIGEN2_FOUND) - - include_directories(${EIGEN2_INCLUDE_DIR} ${PROJECT_SOURCE_DIR}/libs/f77) - btl_add_bench(btl_hand_vec main.cpp OFF) - - btl_add_bench(btl_hand_peeling main.cpp OFF) - if (BUILD_btl_hand_peeling) - set_target_properties(btl_hand_peeling PROPERTIES COMPILE_FLAGS "-DPEELING") - endif (BUILD_btl_hand_peeling) - -endif (EIGEN2_FOUND) \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/hand_vec_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/hand_vec_interface.hh deleted file mode 100755 index 0bb4b64ca..000000000 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/hand_vec/hand_vec_interface.hh +++ /dev/null @@ -1,886 +0,0 @@ -//===================================================== -// File : hand_vec_interface.hh -// Copyright (C) 2008 Gael Guennebaud -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#ifndef HAND_VEC_INTERFACE_HH -#define HAND_VEC_INTERFACE_HH - -#include -#include "f77_interface.hh" - -using namespace Eigen; - -template -class hand_vec_interface : public f77_interface_base { - -public : - - typedef typename internal::packet_traits::type Packet; - static const int PacketSize = internal::packet_traits::size; - - typedef typename f77_interface_base::stl_matrix stl_matrix; - typedef typename f77_interface_base::stl_vector stl_vector; - typedef typename f77_interface_base::gene_matrix gene_matrix; - typedef typename f77_interface_base::gene_vector gene_vector; - - static void free_matrix(gene_matrix & A, int N){ - internal::aligned_free(A); - } - - static void free_vector(gene_vector & B){ - internal::aligned_free(B); - } - - static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){ - int N = A_stl.size(); - A = (real*)internal::aligned_malloc(N*N*sizeof(real)); - for (int j=0;j0) - { -// for (size_t j = 0;j0) - { - bool aligned0 = (iN0 % PacketSize) == 0; - if (aligned0) - for (int j = 0;j0) -// { -// // int aligned0 = (iN0 % PacketSize); -// int aligned1 = (iN1 % PacketSize); -// -// if (aligned1==0) -// { -// for (int j = 0;j0) -// { -// bool aligned0 = (iN0 % PacketSize) == 0; -// if (aligned0) -// for (int j = 0;j0) -// { -// bool aligned0 = (iN0 % PacketSize) == 0; -// bool aligned1 = (iN1 % PacketSize) == 0; -// -// if (aligned0 && aligned1) -// { -// for (int j = 0;j0) -// { -// bool aligned0 = (iN0 % PacketSize) == 0; -// if (aligned0) -// for (int j = 0;j0) -// { -// bool aligned = (iN % PacketSize) == 0; -// if (aligned) -// { -// #ifdef PEELING -// Packet A0, A1, A2, X0, X1, X2; -// int ANP = (AN/(8*PacketSize))*8*PacketSize; -// for (int j = 0;j0) - { - int align1 = (iN1 % PacketSize); - if (align1==0) - { - for (int j = 0;j0) - { - if (iN0 % PacketSize==0) - for (int j = 0;j0) -// { -// bool aligned = (iN % PacketSize) == 0; -// if (aligned) -// { -// #ifdef PEELING -// int ANP = (AN/(8*PacketSize))*8*PacketSize; -// for (int j = 0;j0) - { - Packet pcoef = internal::pset1(coef); - #ifdef PEELING - const int peelSize = 3; - int ANP = (AN/(peelSize*PacketSize))*peelSize*PacketSize; - float* X1 = X + PacketSize; - float* Y1 = Y + PacketSize; - float* X2 = X + 2*PacketSize; - float* Y2 = Y + 2*PacketSize; - Packet x0,x1,x2,y0,y1,y2; - for (int j = 0;j -// Copyright (C) EDF R&D, lun sep 30 14:23:23 CEST 2002 -//===================================================== -// -// This program is free software; you can redistribute it and/or -// modify it under the terms of the GNU General Public License -// as published by the Free Software Foundation; either version 2 -// of the License, or (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -// -#include "utilities.h" -#include "bench.hh" -#include "hand_vec_interface.hh" -#include "action_matrix_vector_product.hh" -#include "action_atv_product.hh" -#include "action_matrix_matrix_product.hh" -#include "action_axpy.hh" -#include "action_ata_product.hh" -#include "action_aat_product.hh" -#include "basic_actions.hh" -//#include "action_lu_solve.hh" -// #include "timers/mixed_perf_analyzer.hh" - -BTL_MAIN; - -int main() -{ - - bench > >(MIN_MV,MAX_MV,NB_POINT); - bench > >(MIN_MV,MAX_MV,NB_POINT); -// bench > >(MIN_MM,MAX_MM,NB_POINT); -// bench > >(MIN_MM,MAX_MM,NB_POINT); -// bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); - - - return 0; -} - - diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/mtl4/mtl4_interface.hh b/gtsam/3rdparty/Eigen/bench/btl/libs/mtl4/mtl4_interface.hh index a2f067f73..3795ac61e 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/mtl4/mtl4_interface.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/mtl4/mtl4_interface.hh @@ -92,9 +92,9 @@ public : X = (trans(A)*trans(B)); } - static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){ - X = (trans(A)*A); - } +// static inline void ata_product(const gene_matrix & A, gene_matrix & X, int N){ +// X = (trans(A)*A); +// } static inline void aat_product(const gene_matrix & A, gene_matrix & X, int N){ X = (A*trans(A)); diff --git a/gtsam/3rdparty/Eigen/bench/btl/libs/ublas/main.cpp b/gtsam/3rdparty/Eigen/bench/btl/libs/ublas/main.cpp index 22d697225..e2e77ee1f 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/libs/ublas/main.cpp +++ b/gtsam/3rdparty/Eigen/bench/btl/libs/ublas/main.cpp @@ -33,8 +33,8 @@ int main() bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); - bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); +// bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); diff --git a/gtsam/3rdparty/Eigen/bench/sparse_dense_product.cpp b/gtsam/3rdparty/Eigen/bench/sparse_dense_product.cpp index bfe46122d..f3f519406 100644 --- a/gtsam/3rdparty/Eigen/bench/sparse_dense_product.cpp +++ b/gtsam/3rdparty/Eigen/bench/sparse_dense_product.cpp @@ -4,7 +4,7 @@ // -DNOGMM -DNOMTL -DCSPARSE // -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a #ifndef SIZE -#define SIZE 10000 +#define SIZE 650000 #endif #ifndef DENSITY @@ -62,7 +62,8 @@ int main(int argc, char *argv[]) BenchTimer timer; for (float density = DENSITY; density>=MINDENSITY; density*=0.5) { - fillMatrix(density, rows, cols, sm1); + //fillMatrix(density, rows, cols, sm1); + fillMatrix2(7, rows, cols, sm1); // dense matrices #ifdef DENSEMATRIX @@ -76,14 +77,14 @@ int main(int argc, char *argv[]) for (int k=0; k uv1, uv2; + eiToUblasVec(v1,uv1); + eiToUblasVec(v2,uv2); + +// std::vector gmmV1(cols), gmmV2(cols); +// Map >(&gmmV1[0], cols) = v1; +// Map >(&gmmV2[0], cols) = v2; + + BENCH( uv2 = boost::numeric::ublas::prod(m1, uv1); ) + std::cout << " a * v:\t" << timer.value() << endl; + +// BENCH( boost::ublas::prod(gmm::transposed(m1), gmmV1, gmmV2); ) +// std::cout << " a' * v:\t" << timer.value() << endl; + } + #endif // MTL4 #ifndef NOMTL diff --git a/gtsam/3rdparty/Eigen/bench/sparse_product.cpp b/gtsam/3rdparty/Eigen/bench/sparse_product.cpp index 0b5558b89..d2fc44f0d 100644 --- a/gtsam/3rdparty/Eigen/bench/sparse_product.cpp +++ b/gtsam/3rdparty/Eigen/bench/sparse_product.cpp @@ -20,6 +20,7 @@ #include #include "BenchTimer.h" +#include "BenchUtil.h" #include "BenchSparseUtil.h" #ifndef NBTRIES @@ -228,16 +229,12 @@ int main(int argc, char *argv[]) eiToCSparse(sm1, m1); eiToCSparse(sm2, m2); -// timer.reset(); -// timer.start(); -// for (int k=0; k + +void bench_printhelp() +{ + cout<< " \nbenchsolver : performs a benchmark of all the solvers available in Eigen \n\n"; + cout<< " MATRIX FOLDER : \n"; + cout<< " The matrices for the benchmark should be collected in a folder specified with an environment variable EIGEN_MATRIXDIR \n"; + cout<< " This folder should contain the subfolders real/ and complex/ : \n"; + cout<< " The matrices are stored using the matrix market coordinate format \n"; + cout<< " The matrix and associated right-hand side (rhs) files are named respectively \n"; + cout<< " as MatrixName.mtx and MatrixName_b.mtx. If the rhs does not exist, a random one is generated. \n"; + cout<< " If a matrix is SPD, the matrix should be named as MatrixName_SPD.mtx \n"; + cout<< " If a true solution exists, it should be named as MatrixName_x.mtx; \n" ; + cout<< " it will be used to compute the norm of the error relative to the computed solutions\n\n"; + cout<< " OPTIONS : \n"; + cout<< " -h or --help \n print this help and return\n\n"; + cout<< " -d matrixdir \n Use matrixdir as the matrix folder instead of the one specified in the environment variable EIGEN_MATRIXDIR\n\n"; + cout<< " -o outputfile.html \n Output the statistics to a html file \n\n"; + cout<< " --eps Sets the relative tolerance for iterative solvers (default 1e-08) \n\n"; + cout<< " --maxits Sets the maximum number of iterations (default 1000) \n\n"; + +} +int main(int argc, char ** args) +{ + + bool help = ( get_options(argc, args, "-h") || get_options(argc, args, "--help") ); + if(help) { + bench_printhelp(); + return 0; + } + + // Get the location of the test matrices + string matrix_dir; + if (!get_options(argc, args, "-d", &matrix_dir)) + { + if(getenv("EIGEN_MATRIXDIR") == NULL){ + std::cerr << "Please, specify the location of the matrices with -d mat_folder or the environment variable EIGEN_MATRIXDIR \n"; + std::cerr << " Run with --help to see the list of all the available options \n"; + return -1; + } + matrix_dir = getenv("EIGEN_MATRIXDIR"); + } + + std::ofstream statbuf; + string statFile ; + + // Get the file to write the statistics + bool statFileExists = get_options(argc, args, "-o", &statFile); + if(statFileExists) + { + statbuf.open(statFile.c_str(), std::ios::out); + if(statbuf.good()){ + statFileExists = true; + printStatheader(statbuf); + statbuf.close(); + } + else + std::cerr << "Unable to open the provided file for writting... \n"; + } + + // Get the maximum number of iterations and the tolerance + int maxiters = 1000; + double tol = 1e-08; + string inval; + if (get_options(argc, args, "--eps", &inval)) + tol = atof(inval.c_str()); + if(get_options(argc, args, "--maxits", &inval)) + maxiters = atoi(inval.c_str()); + + string current_dir; + // Test the matrices in %EIGEN_MATRIXDIR/real + current_dir = matrix_dir + "/real"; + Browse_Matrices(current_dir, statFileExists, statFile,maxiters, tol); + + // Test the matrices in %EIGEN_MATRIXDIR/complex + current_dir = matrix_dir + "/complex"; + Browse_Matrices >(current_dir, statFileExists, statFile, maxiters, tol); + + if(statFileExists) + { + statbuf.open(statFile.c_str(), std::ios::app); + statbuf << " \n"; + cout << "\n Output written in " << statFile << " ...\n"; + statbuf.close(); + } + + return 0; +} + + diff --git a/gtsam/3rdparty/Eigen/bench/spbench/spbenchsolver.h b/gtsam/3rdparty/Eigen/bench/spbench/spbenchsolver.h new file mode 100644 index 000000000..6d765a997 --- /dev/null +++ b/gtsam/3rdparty/Eigen/bench/spbench/spbenchsolver.h @@ -0,0 +1,548 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + + +#include +#include +#include "Eigen/SparseCore" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef EIGEN_CHOLMOD_SUPPORT +#include +#endif + +#ifdef EIGEN_UMFPACK_SUPPORT +#include +#endif + +#ifdef EIGEN_PARDISO_SUPPORT +#include +#endif + +#ifdef EIGEN_SUPERLU_SUPPORT +#include +#endif + +#ifdef EIGEN_PASTIX_SUPPORT +#include +#endif + +// CONSTANTS +#define EIGEN_UMFPACK 0 +#define EIGEN_SUPERLU 1 +#define EIGEN_PASTIX 2 +#define EIGEN_PARDISO 3 +#define EIGEN_BICGSTAB 4 +#define EIGEN_BICGSTAB_ILUT 5 +#define EIGEN_GMRES 6 +#define EIGEN_GMRES_ILUT 7 +#define EIGEN_SIMPLICIAL_LDLT 8 +#define EIGEN_CHOLMOD_LDLT 9 +#define EIGEN_PASTIX_LDLT 10 +#define EIGEN_PARDISO_LDLT 11 +#define EIGEN_SIMPLICIAL_LLT 12 +#define EIGEN_CHOLMOD_SUPERNODAL_LLT 13 +#define EIGEN_CHOLMOD_SIMPLICIAL_LLT 14 +#define EIGEN_PASTIX_LLT 15 +#define EIGEN_PARDISO_LLT 16 +#define EIGEN_CG 17 +#define EIGEN_CG_PRECOND 18 +#define EIGEN_ALL_SOLVERS 19 + +using namespace Eigen; +using namespace std; + +struct Stats{ + ComputationInfo info; + double total_time; + double compute_time; + double solve_time; + double rel_error; + int memory_used; + int iterations; + int isavail; + int isIterative; +}; + +// Global variables for input parameters +int MaximumIters; // Maximum number of iterations +double RelErr; // Relative error of the computed solution + +template inline typename NumTraits::Real test_precision() { return NumTraits::dummy_precision(); } +template<> inline float test_precision() { return 1e-3f; } +template<> inline double test_precision() { return 1e-6; } +template<> inline float test_precision >() { return test_precision(); } +template<> inline double test_precision >() { return test_precision(); } + +void printStatheader(std::ofstream& out) +{ + int LUcnt = 0; + string LUlist =" ", LLTlist = " LLT", LDLTlist = " LDLT "; + +#ifdef EIGEN_UMFPACK_SUPPORT + LUlist += " UMFPACK "; LUcnt++; +#endif +#ifdef EIGEN_SUPERLU_SUPPORT + LUlist += " SUPERLU "; LUcnt++; +#endif +#ifdef EIGEN_CHOLMOD_SUPPORT + LLTlist += " CHOLMOD SP LLT CHOLMOD LLT"; + LDLTlist += "CHOLMOD LDLT"; +#endif +#ifdef EIGEN_PARDISO_SUPPORT + LUlist += " PARDISO LU"; LUcnt++; + LLTlist += " PARDISO LLT"; + LDLTlist += " PARDISO LDLT"; +#endif +#ifdef EIGEN_PASTIX_SUPPORT + LUlist += " PASTIX LU"; LUcnt++; + LLTlist += " PASTIX LLT"; + LDLTlist += " PASTIX LDLT"; +#endif + + out << "\n "; + out << "
Matrix N NNZ "; + if (LUcnt) out << LUlist; + out << " BiCGSTAB BiCGSTAB+ILUT"<< "GMRES+ILUT" < CG "<< std::endl; +} + + +template +Stats call_solver(Solver &solver, const typename Solver::MatrixType& A, const Matrix& b, const Matrix& refX) +{ + Stats stat; + Matrix x; + BenchTimer timer; + timer.reset(); + timer.start(); + solver.compute(A); + if (solver.info() != Success) + { + stat.info = NumericalIssue; + std::cerr << "Solver failed ... \n"; + return stat; + } + timer.stop(); + stat.compute_time = timer.value(); + + timer.reset(); + timer.start(); + x = solver.solve(b); + if (solver.info() == NumericalIssue) + { + stat.info = NumericalIssue; + std::cerr << "Solver failed ... \n"; + return stat; + } + + timer.stop(); + stat.solve_time = timer.value(); + stat.total_time = stat.solve_time + stat.compute_time; + stat.memory_used = 0; + // Verify the relative error + if(refX.size() != 0) + stat.rel_error = (refX - x).norm()/refX.norm(); + else + { + // Compute the relative residual norm + Matrix temp; + temp = A * x; + stat.rel_error = (b-temp).norm()/b.norm(); + } + if ( stat.rel_error > RelErr ) + { + stat.info = NoConvergence; + return stat; + } + else + { + stat.info = Success; + return stat; + } +} + +template +Stats call_directsolver(Solver& solver, const typename Solver::MatrixType& A, const Matrix& b, const Matrix& refX) +{ + Stats stat; + stat = call_solver(solver, A, b, refX); + return stat; +} + +template +Stats call_itersolver(Solver &solver, const typename Solver::MatrixType& A, const Matrix& b, const Matrix& refX) +{ + Stats stat; + solver.setTolerance(RelErr); + solver.setMaxIterations(MaximumIters); + + stat = call_solver(solver, A, b, refX); + stat.iterations = solver.iterations(); + return stat; +} + +inline void printStatItem(Stats *stat, int solver_id, int& best_time_id, double& best_time_val) +{ + stat[solver_id].isavail = 1; + + if (stat[solver_id].info == NumericalIssue) + { + cout << " SOLVER FAILED ... Probably a numerical issue \n"; + return; + } + if (stat[solver_id].info == NoConvergence){ + cout << "REL. ERROR " << stat[solver_id].rel_error; + if(stat[solver_id].isIterative == 1) + cout << " (" << stat[solver_id].iterations << ") \n"; + return; + } + + // Record the best CPU time + if (!best_time_val) + { + best_time_val = stat[solver_id].total_time; + best_time_id = solver_id; + } + else if (stat[solver_id].total_time < best_time_val) + { + best_time_val = stat[solver_id].total_time; + best_time_id = solver_id; + } + // Print statistics to standard output + if (stat[solver_id].info == Success){ + cout<< "COMPUTE TIME : " << stat[solver_id].compute_time<< " \n"; + cout<< "SOLVE TIME : " << stat[solver_id].solve_time<< " \n"; + cout<< "TOTAL TIME : " << stat[solver_id].total_time<< " \n"; + cout << "REL. ERROR : " << stat[solver_id].rel_error ; + if(stat[solver_id].isIterative == 1) { + cout << " (" << stat[solver_id].iterations << ") "; + } + cout << std::endl; + } + +} + + +/* Print the results from all solvers corresponding to a particular matrix + * The best CPU time is printed in bold + */ +inline void printHtmlStatLine(Stats *stat, int best_time_id, string& statline) +{ + + string markup; + ostringstream compute,solve,total,error; + for (int i = 0; i < EIGEN_ALL_SOLVERS; i++) + { + if (stat[i].isavail == 0) continue; + if(i == best_time_id) + markup = ""; + else + markup = ""; + + if (stat[i].info == Success){ + compute << markup << stat[i].compute_time; + solve << markup << stat[i].solve_time; + total << markup << stat[i].total_time; + error << " " << stat[i].rel_error; + if(stat[i].isIterative == 1) { + error << " (" << stat[i].iterations << ") "; + } + } + else { + compute << " -" ; + solve << " -" ; + total << " -" ; + if(stat[i].info == NoConvergence){ + error << " "<< stat[i].rel_error ; + if(stat[i].isIterative == 1) + error << " (" << stat[i].iterations << ") "; + } + else error << " - "; + } + } + + statline = "Compute Time " + compute.str() + "\n" + + "
Solve Time " + solve.str() + "\n" + + "
Total Time " + total.str() + "\n" + +"
Error(Iter)" + error.str() + "\n"; + +} + +template +int SelectSolvers(const SparseMatrix&A, unsigned int sym, Matrix& b, const Matrix& refX, Stats *stat) +{ + typedef SparseMatrix SpMat; + // First, deal with Nonsymmetric and symmetric matrices + int best_time_id = 0; + double best_time_val = 0.0; + //UMFPACK + #ifdef EIGEN_UMFPACK_SUPPORT + { + cout << "Solving with UMFPACK LU ... \n"; + UmfPackLU solver; + stat[EIGEN_UMFPACK] = call_directsolver(solver, A, b, refX); + printStatItem(stat, EIGEN_UMFPACK, best_time_id, best_time_val); + } + #endif + //SuperLU + #ifdef EIGEN_SUPERLU_SUPPORT + { + cout << "\nSolving with SUPERLU ... \n"; + SuperLU solver; + stat[EIGEN_SUPERLU] = call_directsolver(solver, A, b, refX); + printStatItem(stat, EIGEN_SUPERLU, best_time_id, best_time_val); + } + #endif + + // PaStix LU + #ifdef EIGEN_PASTIX_SUPPORT + { + cout << "\nSolving with PASTIX LU ... \n"; + PastixLU solver; + stat[EIGEN_PASTIX] = call_directsolver(solver, A, b, refX) ; + printStatItem(stat, EIGEN_PASTIX, best_time_id, best_time_val); + } + #endif + + //PARDISO LU + #ifdef EIGEN_PARDISO_SUPPORT + { + cout << "\nSolving with PARDISO LU ... \n"; + PardisoLU solver; + stat[EIGEN_PARDISO] = call_directsolver(solver, A, b, refX); + printStatItem(stat, EIGEN_PARDISO, best_time_id, best_time_val); + } + #endif + + + + //BiCGSTAB + { + cout << "\nSolving with BiCGSTAB ... \n"; + BiCGSTAB solver; + stat[EIGEN_BICGSTAB] = call_itersolver(solver, A, b, refX); + stat[EIGEN_BICGSTAB].isIterative = 1; + printStatItem(stat, EIGEN_BICGSTAB, best_time_id, best_time_val); + } + //BiCGSTAB+ILUT + { + cout << "\nSolving with BiCGSTAB and ILUT ... \n"; + BiCGSTAB > solver; + stat[EIGEN_BICGSTAB_ILUT] = call_itersolver(solver, A, b, refX); + stat[EIGEN_BICGSTAB_ILUT].isIterative = 1; + printStatItem(stat, EIGEN_BICGSTAB_ILUT, best_time_id, best_time_val); + } + + + //GMRES +// { +// cout << "\nSolving with GMRES ... \n"; +// GMRES solver; +// stat[EIGEN_GMRES] = call_itersolver(solver, A, b, refX); +// stat[EIGEN_GMRES].isIterative = 1; +// printStatItem(stat, EIGEN_GMRES, best_time_id, best_time_val); +// } + //GMRES+ILUT + { + cout << "\nSolving with GMRES and ILUT ... \n"; + GMRES > solver; + stat[EIGEN_GMRES_ILUT] = call_itersolver(solver, A, b, refX); + stat[EIGEN_GMRES_ILUT].isIterative = 1; + printStatItem(stat, EIGEN_GMRES_ILUT, best_time_id, best_time_val); + } + + // Hermitian and not necessarily positive-definites + if (sym != NonSymmetric) + { + // Internal Cholesky + { + cout << "\nSolving with Simplicial LDLT ... \n"; + SimplicialLDLT solver; + stat[EIGEN_SIMPLICIAL_LDLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat, EIGEN_SIMPLICIAL_LDLT, best_time_id, best_time_val); + } + + // CHOLMOD + #ifdef EIGEN_CHOLMOD_SUPPORT + { + cout << "\nSolving with CHOLMOD LDLT ... \n"; + CholmodDecomposition solver; + solver.setMode(CholmodLDLt); + stat[EIGEN_CHOLMOD_LDLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_CHOLMOD_LDLT, best_time_id, best_time_val); + } + #endif + + //PASTIX LLT + #ifdef EIGEN_PASTIX_SUPPORT + { + cout << "\nSolving with PASTIX LDLT ... \n"; + PastixLDLT solver; + stat[EIGEN_PASTIX_LDLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_PASTIX_LDLT, best_time_id, best_time_val); + } + #endif + + //PARDISO LLT + #ifdef EIGEN_PARDISO_SUPPORT + { + cout << "\nSolving with PARDISO LDLT ... \n"; + PardisoLDLT solver; + stat[EIGEN_PARDISO_LDLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_PARDISO_LDLT, best_time_id, best_time_val); + } + #endif + } + + // Now, symmetric POSITIVE DEFINITE matrices + if (sym == SPD) + { + + //Internal Sparse Cholesky + { + cout << "\nSolving with SIMPLICIAL LLT ... \n"; + SimplicialLLT solver; + stat[EIGEN_SIMPLICIAL_LLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_SIMPLICIAL_LLT, best_time_id, best_time_val); + } + + // CHOLMOD + #ifdef EIGEN_CHOLMOD_SUPPORT + { + // CholMOD SuperNodal LLT + cout << "\nSolving with CHOLMOD LLT (Supernodal)... \n"; + CholmodDecomposition solver; + solver.setMode(CholmodSupernodalLLt); + stat[EIGEN_CHOLMOD_SUPERNODAL_LLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_CHOLMOD_SUPERNODAL_LLT, best_time_id, best_time_val); + // CholMod Simplicial LLT + cout << "\nSolving with CHOLMOD LLT (Simplicial) ... \n"; + solver.setMode(CholmodSimplicialLLt); + stat[EIGEN_CHOLMOD_SIMPLICIAL_LLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_CHOLMOD_SIMPLICIAL_LLT, best_time_id, best_time_val); + } + #endif + + //PASTIX LLT + #ifdef EIGEN_PASTIX_SUPPORT + { + cout << "\nSolving with PASTIX LLT ... \n"; + PastixLLT solver; + stat[EIGEN_PASTIX_LLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_PASTIX_LLT, best_time_id, best_time_val); + } + #endif + + //PARDISO LLT + #ifdef EIGEN_PARDISO_SUPPORT + { + cout << "\nSolving with PARDISO LLT ... \n"; + PardisoLLT solver; + stat[EIGEN_PARDISO_LLT] = call_directsolver(solver, A, b, refX); + printStatItem(stat,EIGEN_PARDISO_LLT, best_time_id, best_time_val); + } + #endif + + // Internal CG + { + cout << "\nSolving with CG ... \n"; + ConjugateGradient solver; + stat[EIGEN_CG] = call_itersolver(solver, A, b, refX); + stat[EIGEN_CG].isIterative = 1; + printStatItem(stat,EIGEN_CG, best_time_id, best_time_val); + } + //CG+IdentityPreconditioner +// { +// cout << "\nSolving with CG and IdentityPreconditioner ... \n"; +// ConjugateGradient solver; +// stat[EIGEN_CG_PRECOND] = call_itersolver(solver, A, b, refX); +// stat[EIGEN_CG_PRECOND].isIterative = 1; +// printStatItem(stat,EIGEN_CG_PRECOND, best_time_id, best_time_val); +// } + } // End SPD matrices + + return best_time_id; +} + +/* Browse all the matrices available in the specified folder + * and solve the associated linear system. + * The results of each solve are printed in the standard output + * and optionally in the provided html file + */ +template +void Browse_Matrices(const string folder, bool statFileExists, std::string& statFile, int maxiters, double tol) +{ + MaximumIters = maxiters; // Maximum number of iterations, global variable + RelErr = tol; //Relative residual error as stopping criterion for iterative solvers + MatrixMarketIterator it(folder); + Stats stat[EIGEN_ALL_SOLVERS]; + for ( ; it; ++it) + { + for (int i = 0; i < EIGEN_ALL_SOLVERS; i++) + { + stat[i].isavail = 0; + stat[i].isIterative = 0; + } + + int best_time_id; + cout<< "\n\n===================================================== \n"; + cout<< " ====== SOLVING WITH MATRIX " << it.matname() << " ====\n"; + cout<< " =================================================== \n\n"; + Matrix refX; + if(it.hasrefX()) refX = it.refX(); + best_time_id = SelectSolvers(it.matrix(), it.sym(), it.rhs(), refX, &stat[0]); + + if(statFileExists) + { + string statline; + printHtmlStatLine(&stat[0], best_time_id, statline); + std::ofstream statbuf(statFile.c_str(), std::ios::app); + statbuf << "
" << it.matname() << " " + << it.matrix().rows() << " " << it.matrix().nonZeros()<< " "<< statline ; + statbuf.close(); + } + } +} + +bool get_options(int argc, char **args, string option, string* value=0) +{ + int idx = 1, found=false; + while (idx +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_BAND_TRIANGULARSOLVER_H +#define EIGEN_BAND_TRIANGULARSOLVER_H + +namespace internal { + + /* \internal + * Solve Ax=b with A a band triangular matrix + * TODO: extend it to matrices for x abd b */ +template +struct band_solve_triangular_selector; + + +template +struct band_solve_triangular_selector +{ + typedef Map, 0, OuterStride<> > LhsMap; + typedef Map > RhsMap; + enum { IsLower = (Mode&Lower) ? 1 : 0 }; + static void run(Index size, Index k, const LhsScalar* _lhs, Index lhsStride, RhsScalar* _other) + { + const LhsMap lhs(_lhs,size,k+1,OuterStride<>(lhsStride)); + RhsMap other(_other,size,1); + typename internal::conditional< + ConjLhs, + const CwiseUnaryOp,LhsMap>, + const LhsMap&> + ::type cjLhs(lhs); + + for(int col=0 ; col0) + other.coeffRef(i,col) -= cjLhs.row(i).segment(actual_start,actual_k).transpose() + .cwiseProduct(other.col(col).segment(IsLower ? i-actual_k : i+1,actual_k)).sum(); + + if((Mode&UnitDiag)==0) + other.coeffRef(i,col) /= cjLhs(i,IsLower ? k : 0); + } + } + } + +}; + +template +struct band_solve_triangular_selector +{ + typedef Map, 0, OuterStride<> > LhsMap; + typedef Map > RhsMap; + enum { IsLower = (Mode&Lower) ? 1 : 0 }; + static void run(Index size, Index k, const LhsScalar* _lhs, Index lhsStride, RhsScalar* _other) + { + const LhsMap lhs(_lhs,k+1,size,OuterStride<>(lhsStride)); + RhsMap other(_other,size,1); + typename internal::conditional< + ConjLhs, + const CwiseUnaryOp,LhsMap>, + const LhsMap&> + ::type cjLhs(lhs); + + for(int col=0 ; col0) + other.col(col).segment(IsLower ? i+1 : i-actual_k, actual_k) + -= other.coeff(i,col) * cjLhs.col(i).segment(actual_start,actual_k); + + } + } + } +}; + + +} // end namespace internal + +#endif // EIGEN_BAND_TRIANGULARSOLVER_H diff --git a/gtsam/3rdparty/Eigen/blas/CMakeLists.txt b/gtsam/3rdparty/Eigen/blas/CMakeLists.txt index 5f2e3cd32..453d5874c 100644 --- a/gtsam/3rdparty/Eigen/blas/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/blas/CMakeLists.txt @@ -1,27 +1,34 @@ project(EigenBlas CXX) -if( NOT DEFINED EIGEN_Fortran_COMPILER_WORKS OR EIGEN_Fortran_COMPILER_WORKS) +include("../cmake/language_support.cmake") +workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) + +if(EIGEN_Fortran_COMPILER_WORKS) enable_language(Fortran OPTIONAL) - - if(CMAKE_Fortran_COMPILER_WORKS) - set(EIGEN_Fortran_COMPILER_WORKS TRUE CACHE INTERNAL "workaround cmake's enable_language issue") - else() - set(EIGEN_Fortran_COMPILER_WORKS FALSE CACHE INTERNAL "workaround cmake's enable_language issue") - endif() - endif() -if(CMAKE_Fortran_COMPILER_WORKS) - add_custom_target(blas) -set(EigenBlas_SRCS single.cpp double.cpp complex_single.cpp complex_double.cpp xerbla.cpp +set(EigenBlas_SRCS single.cpp double.cpp complex_single.cpp complex_double.cpp xerbla.cpp) + +if(EIGEN_Fortran_COMPILER_WORKS) + +set(EigenBlas_SRCS ${EigenBlas_SRCS} complexdots.f srotm.f srotmg.f drotm.f drotmg.f - lsame.f chpr2.f ctbsv.f dspmv.f dtbmv.f dtpsv.f ssbmv.f sspr.f stpmv.f zhpr2.f ztbsv.f chbmv.f chpr.f ctpmv.f dspr2.f dtbsv.f sspmv.f stbmv.f stpsv.f zhbmv.f zhpr.f ztpmv.f chpmv.f ctbmv.f ctpsv.f dsbmv.f dspr.f dtpmv.f sspr2.f stbsv.f zhpmv.f ztbmv.f ztpsv.f + lsame.f chpr2.f dspmv.f dtpsv.f ssbmv.f sspr.f stpmv.f + zhpr2.f chbmv.f chpr.f ctpmv.f dspr2.f sspmv.f stpsv.f + zhbmv.f zhpr.f ztpmv.f chpmv.f ctpsv.f dsbmv.f dspr.f dtpmv.f sspr2.f + zhpmv.f ztpsv.f + dtbmv.f stbmv.f ctbmv.f ztbmv.f ) +else() + +message(WARNING " No fortran compiler has been detected, the blas build will be incomplete.") + +endif() add_library(eigen_blas_static ${EigenBlas_SRCS}) add_library(eigen_blas SHARED ${EigenBlas_SRCS}) @@ -33,11 +40,12 @@ endif() add_dependencies(blas eigen_blas eigen_blas_static) -install(TARGETS eigen_blas +install(TARGETS eigen_blas eigen_blas_static RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib) +if(EIGEN_Fortran_COMPILER_WORKS) if(EIGEN_LEAVE_TEST_IN_ALL_TARGET) add_subdirectory(testing) # can't do EXCLUDE_FROM_ALL here, breaks CTest @@ -45,4 +53,5 @@ else() add_subdirectory(testing EXCLUDE_FROM_ALL) endif() -endif(CMAKE_Fortran_COMPILER_WORKS) +endif() + diff --git a/gtsam/3rdparty/Eigen/blas/common.h b/gtsam/3rdparty/Eigen/blas/common.h index ed93642f9..ada833a90 100644 --- a/gtsam/3rdparty/Eigen/blas/common.h +++ b/gtsam/3rdparty/Eigen/blas/common.h @@ -32,16 +32,8 @@ #error the token SCALAR must be defined to compile this file #endif -#ifdef __cplusplus -extern "C" -{ -#endif +#include -#include "../bench/btl/libs/C_BLAS/blas.h" - -#ifdef __cplusplus -} -#endif #define NOTR 0 #define TR 1 @@ -93,6 +85,12 @@ inline bool check_uplo(const char* uplo) #include #include + + +namespace Eigen { +#include "BandTriangularSolver.h" +} + using namespace Eigen; typedef SCALAR Scalar; diff --git a/gtsam/3rdparty/Eigen/blas/ctbsv.f b/gtsam/3rdparty/Eigen/blas/ctbsv.f deleted file mode 100644 index 853b9d75e..000000000 --- a/gtsam/3rdparty/Eigen/blas/ctbsv.f +++ /dev/null @@ -1,370 +0,0 @@ - SUBROUTINE CTBSV(UPLO,TRANS,DIAG,N,K,A,LDA,X,INCX) -* .. Scalar Arguments .. - INTEGER INCX,K,LDA,N - CHARACTER DIAG,TRANS,UPLO -* .. -* .. Array Arguments .. - COMPLEX A(LDA,*),X(*) -* .. -* -* Purpose -* ======= -* -* CTBSV solves one of the systems of equations -* -* A*x = b, or A'*x = b, or conjg( A' )*x = b, -* -* where b and x are n element vectors and A is an n by n unit, or -* non-unit, upper or lower triangular band matrix, with ( k + 1 ) -* diagonals. -* -* No test for singularity or near-singularity is included in this -* routine. Such tests must be performed before calling this routine. -* -* Arguments -* ========== -* -* UPLO - CHARACTER*1. -* On entry, UPLO specifies whether the matrix is an upper or -* lower triangular matrix as follows: -* -* UPLO = 'U' or 'u' A is an upper triangular matrix. -* -* UPLO = 'L' or 'l' A is a lower triangular matrix. -* -* Unchanged on exit. -* -* TRANS - CHARACTER*1. -* On entry, TRANS specifies the equations to be solved as -* follows: -* -* TRANS = 'N' or 'n' A*x = b. -* -* TRANS = 'T' or 't' A'*x = b. -* -* TRANS = 'C' or 'c' conjg( A' )*x = b. -* -* Unchanged on exit. -* -* DIAG - CHARACTER*1. -* On entry, DIAG specifies whether or not A is unit -* triangular as follows: -* -* DIAG = 'U' or 'u' A is assumed to be unit triangular. -* -* DIAG = 'N' or 'n' A is not assumed to be unit -* triangular. -* -* Unchanged on exit. -* -* N - INTEGER. -* On entry, N specifies the order of the matrix A. -* N must be at least zero. -* Unchanged on exit. -* -* K - INTEGER. -* On entry with UPLO = 'U' or 'u', K specifies the number of -* super-diagonals of the matrix A. -* On entry with UPLO = 'L' or 'l', K specifies the number of -* sub-diagonals of the matrix A. -* K must satisfy 0 .le. K. -* Unchanged on exit. -* -* A - COMPLEX array of DIMENSION ( LDA, n ). -* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) -* by n part of the array A must contain the upper triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row -* ( k + 1 ) of the array, the first super-diagonal starting at -* position 2 in row k, and so on. The top left k by k triangle -* of the array A is not referenced. -* The following program segment will transfer an upper -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = K + 1 - J -* DO 10, I = MAX( 1, J - K ), J -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) -* by n part of the array A must contain the lower triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row 1 of -* the array, the first sub-diagonal starting at position 1 in -* row 2, and so on. The bottom right k by k triangle of the -* array A is not referenced. -* The following program segment will transfer a lower -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = 1 - J -* DO 10, I = J, MIN( N, J + K ) -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Note that when DIAG = 'U' or 'u' the elements of the array A -* corresponding to the diagonal elements of the matrix are not -* referenced, but are assumed to be unity. -* Unchanged on exit. -* -* LDA - INTEGER. -* On entry, LDA specifies the first dimension of A as declared -* in the calling (sub) program. LDA must be at least -* ( k + 1 ). -* Unchanged on exit. -* -* X - COMPLEX array of dimension at least -* ( 1 + ( n - 1 )*abs( INCX ) ). -* Before entry, the incremented array X must contain the n -* element right-hand side vector b. On exit, X is overwritten -* with the solution vector x. -* -* INCX - INTEGER. -* On entry, INCX specifies the increment for the elements of -* X. INCX must not be zero. -* Unchanged on exit. -* -* Further Details -* =============== -* -* Level 2 Blas routine. -* -* -- Written on 22-October-1986. -* Jack Dongarra, Argonne National Lab. -* Jeremy Du Croz, Nag Central Office. -* Sven Hammarling, Nag Central Office. -* Richard Hanson, Sandia National Labs. -* -* ===================================================================== -* -* .. Parameters .. - COMPLEX ZERO - PARAMETER (ZERO= (0.0E+0,0.0E+0)) -* .. -* .. Local Scalars .. - COMPLEX TEMP - INTEGER I,INFO,IX,J,JX,KPLUS1,KX,L - LOGICAL NOCONJ,NOUNIT -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME -* .. -* .. External Subroutines .. - EXTERNAL XERBLA -* .. -* .. Intrinsic Functions .. - INTRINSIC CONJG,MAX,MIN -* .. -* -* Test the input parameters. -* - INFO = 0 - IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN - INFO = 1 - ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND. - + .NOT.LSAME(TRANS,'C')) THEN - INFO = 2 - ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN - INFO = 3 - ELSE IF (N.LT.0) THEN - INFO = 4 - ELSE IF (K.LT.0) THEN - INFO = 5 - ELSE IF (LDA.LT. (K+1)) THEN - INFO = 7 - ELSE IF (INCX.EQ.0) THEN - INFO = 9 - END IF - IF (INFO.NE.0) THEN - CALL XERBLA('CTBSV ',INFO) - RETURN - END IF -* -* Quick return if possible. -* - IF (N.EQ.0) RETURN -* - NOCONJ = LSAME(TRANS,'T') - NOUNIT = LSAME(DIAG,'N') -* -* Set up the start point in X if the increment is not unity. This -* will be ( N - 1 )*INCX too small for descending loops. -* - IF (INCX.LE.0) THEN - KX = 1 - (N-1)*INCX - ELSE IF (INCX.NE.1) THEN - KX = 1 - END IF -* -* Start the operations. In this version the elements of A are -* accessed by sequentially with one pass through A. -* - IF (LSAME(TRANS,'N')) THEN -* -* Form x := inv( A )*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 20 J = N,1,-1 - IF (X(J).NE.ZERO) THEN - L = KPLUS1 - J - IF (NOUNIT) X(J) = X(J)/A(KPLUS1,J) - TEMP = X(J) - DO 10 I = J - 1,MAX(1,J-K),-1 - X(I) = X(I) - TEMP*A(L+I,J) - 10 CONTINUE - END IF - 20 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 40 J = N,1,-1 - KX = KX - INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = KPLUS1 - J - IF (NOUNIT) X(JX) = X(JX)/A(KPLUS1,J) - TEMP = X(JX) - DO 30 I = J - 1,MAX(1,J-K),-1 - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX - INCX - 30 CONTINUE - END IF - JX = JX - INCX - 40 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 60 J = 1,N - IF (X(J).NE.ZERO) THEN - L = 1 - J - IF (NOUNIT) X(J) = X(J)/A(1,J) - TEMP = X(J) - DO 50 I = J + 1,MIN(N,J+K) - X(I) = X(I) - TEMP*A(L+I,J) - 50 CONTINUE - END IF - 60 CONTINUE - ELSE - JX = KX - DO 80 J = 1,N - KX = KX + INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = 1 - J - IF (NOUNIT) X(JX) = X(JX)/A(1,J) - TEMP = X(JX) - DO 70 I = J + 1,MIN(N,J+K) - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX + INCX - 70 CONTINUE - END IF - JX = JX + INCX - 80 CONTINUE - END IF - END IF - ELSE -* -* Form x := inv( A' )*x or x := inv( conjg( A') )*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 110 J = 1,N - TEMP = X(J) - L = KPLUS1 - J - IF (NOCONJ) THEN - DO 90 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(I) - 90 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - ELSE - DO 100 I = MAX(1,J-K),J - 1 - TEMP = TEMP - CONJG(A(L+I,J))*X(I) - 100 CONTINUE - IF (NOUNIT) TEMP = TEMP/CONJG(A(KPLUS1,J)) - END IF - X(J) = TEMP - 110 CONTINUE - ELSE - JX = KX - DO 140 J = 1,N - TEMP = X(JX) - IX = KX - L = KPLUS1 - J - IF (NOCONJ) THEN - DO 120 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX + INCX - 120 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - ELSE - DO 130 I = MAX(1,J-K),J - 1 - TEMP = TEMP - CONJG(A(L+I,J))*X(IX) - IX = IX + INCX - 130 CONTINUE - IF (NOUNIT) TEMP = TEMP/CONJG(A(KPLUS1,J)) - END IF - X(JX) = TEMP - JX = JX + INCX - IF (J.GT.K) KX = KX + INCX - 140 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 170 J = N,1,-1 - TEMP = X(J) - L = 1 - J - IF (NOCONJ) THEN - DO 150 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(I) - 150 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - ELSE - DO 160 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - CONJG(A(L+I,J))*X(I) - 160 CONTINUE - IF (NOUNIT) TEMP = TEMP/CONJG(A(1,J)) - END IF - X(J) = TEMP - 170 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 200 J = N,1,-1 - TEMP = X(JX) - IX = KX - L = 1 - J - IF (NOCONJ) THEN - DO 180 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX - INCX - 180 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - ELSE - DO 190 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - CONJG(A(L+I,J))*X(IX) - IX = IX - INCX - 190 CONTINUE - IF (NOUNIT) TEMP = TEMP/CONJG(A(1,J)) - END IF - X(JX) = TEMP - JX = JX - INCX - IF ((N-J).GE.K) KX = KX - INCX - 200 CONTINUE - END IF - END IF - END IF -* - RETURN -* -* End of CTBSV . -* - END diff --git a/gtsam/3rdparty/Eigen/blas/dtbsv.f b/gtsam/3rdparty/Eigen/blas/dtbsv.f deleted file mode 100644 index cfeb0b82b..000000000 --- a/gtsam/3rdparty/Eigen/blas/dtbsv.f +++ /dev/null @@ -1,339 +0,0 @@ - SUBROUTINE DTBSV(UPLO,TRANS,DIAG,N,K,A,LDA,X,INCX) -* .. Scalar Arguments .. - INTEGER INCX,K,LDA,N - CHARACTER DIAG,TRANS,UPLO -* .. -* .. Array Arguments .. - DOUBLE PRECISION A(LDA,*),X(*) -* .. -* -* Purpose -* ======= -* -* DTBSV solves one of the systems of equations -* -* A*x = b, or A'*x = b, -* -* where b and x are n element vectors and A is an n by n unit, or -* non-unit, upper or lower triangular band matrix, with ( k + 1 ) -* diagonals. -* -* No test for singularity or near-singularity is included in this -* routine. Such tests must be performed before calling this routine. -* -* Arguments -* ========== -* -* UPLO - CHARACTER*1. -* On entry, UPLO specifies whether the matrix is an upper or -* lower triangular matrix as follows: -* -* UPLO = 'U' or 'u' A is an upper triangular matrix. -* -* UPLO = 'L' or 'l' A is a lower triangular matrix. -* -* Unchanged on exit. -* -* TRANS - CHARACTER*1. -* On entry, TRANS specifies the equations to be solved as -* follows: -* -* TRANS = 'N' or 'n' A*x = b. -* -* TRANS = 'T' or 't' A'*x = b. -* -* TRANS = 'C' or 'c' A'*x = b. -* -* Unchanged on exit. -* -* DIAG - CHARACTER*1. -* On entry, DIAG specifies whether or not A is unit -* triangular as follows: -* -* DIAG = 'U' or 'u' A is assumed to be unit triangular. -* -* DIAG = 'N' or 'n' A is not assumed to be unit -* triangular. -* -* Unchanged on exit. -* -* N - INTEGER. -* On entry, N specifies the order of the matrix A. -* N must be at least zero. -* Unchanged on exit. -* -* K - INTEGER. -* On entry with UPLO = 'U' or 'u', K specifies the number of -* super-diagonals of the matrix A. -* On entry with UPLO = 'L' or 'l', K specifies the number of -* sub-diagonals of the matrix A. -* K must satisfy 0 .le. K. -* Unchanged on exit. -* -* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). -* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) -* by n part of the array A must contain the upper triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row -* ( k + 1 ) of the array, the first super-diagonal starting at -* position 2 in row k, and so on. The top left k by k triangle -* of the array A is not referenced. -* The following program segment will transfer an upper -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = K + 1 - J -* DO 10, I = MAX( 1, J - K ), J -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) -* by n part of the array A must contain the lower triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row 1 of -* the array, the first sub-diagonal starting at position 1 in -* row 2, and so on. The bottom right k by k triangle of the -* array A is not referenced. -* The following program segment will transfer a lower -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = 1 - J -* DO 10, I = J, MIN( N, J + K ) -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Note that when DIAG = 'U' or 'u' the elements of the array A -* corresponding to the diagonal elements of the matrix are not -* referenced, but are assumed to be unity. -* Unchanged on exit. -* -* LDA - INTEGER. -* On entry, LDA specifies the first dimension of A as declared -* in the calling (sub) program. LDA must be at least -* ( k + 1 ). -* Unchanged on exit. -* -* X - DOUBLE PRECISION array of dimension at least -* ( 1 + ( n - 1 )*abs( INCX ) ). -* Before entry, the incremented array X must contain the n -* element right-hand side vector b. On exit, X is overwritten -* with the solution vector x. -* -* INCX - INTEGER. -* On entry, INCX specifies the increment for the elements of -* X. INCX must not be zero. -* Unchanged on exit. -* -* Further Details -* =============== -* -* Level 2 Blas routine. -* -* -- Written on 22-October-1986. -* Jack Dongarra, Argonne National Lab. -* Jeremy Du Croz, Nag Central Office. -* Sven Hammarling, Nag Central Office. -* Richard Hanson, Sandia National Labs. -* -* ===================================================================== -* -* .. Parameters .. - DOUBLE PRECISION ZERO - PARAMETER (ZERO=0.0D+0) -* .. -* .. Local Scalars .. - DOUBLE PRECISION TEMP - INTEGER I,INFO,IX,J,JX,KPLUS1,KX,L - LOGICAL NOUNIT -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME -* .. -* .. External Subroutines .. - EXTERNAL XERBLA -* .. -* .. Intrinsic Functions .. - INTRINSIC MAX,MIN -* .. -* -* Test the input parameters. -* - INFO = 0 - IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN - INFO = 1 - ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND. - + .NOT.LSAME(TRANS,'C')) THEN - INFO = 2 - ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN - INFO = 3 - ELSE IF (N.LT.0) THEN - INFO = 4 - ELSE IF (K.LT.0) THEN - INFO = 5 - ELSE IF (LDA.LT. (K+1)) THEN - INFO = 7 - ELSE IF (INCX.EQ.0) THEN - INFO = 9 - END IF - IF (INFO.NE.0) THEN - CALL XERBLA('DTBSV ',INFO) - RETURN - END IF -* -* Quick return if possible. -* - IF (N.EQ.0) RETURN -* - NOUNIT = LSAME(DIAG,'N') -* -* Set up the start point in X if the increment is not unity. This -* will be ( N - 1 )*INCX too small for descending loops. -* - IF (INCX.LE.0) THEN - KX = 1 - (N-1)*INCX - ELSE IF (INCX.NE.1) THEN - KX = 1 - END IF -* -* Start the operations. In this version the elements of A are -* accessed by sequentially with one pass through A. -* - IF (LSAME(TRANS,'N')) THEN -* -* Form x := inv( A )*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 20 J = N,1,-1 - IF (X(J).NE.ZERO) THEN - L = KPLUS1 - J - IF (NOUNIT) X(J) = X(J)/A(KPLUS1,J) - TEMP = X(J) - DO 10 I = J - 1,MAX(1,J-K),-1 - X(I) = X(I) - TEMP*A(L+I,J) - 10 CONTINUE - END IF - 20 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 40 J = N,1,-1 - KX = KX - INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = KPLUS1 - J - IF (NOUNIT) X(JX) = X(JX)/A(KPLUS1,J) - TEMP = X(JX) - DO 30 I = J - 1,MAX(1,J-K),-1 - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX - INCX - 30 CONTINUE - END IF - JX = JX - INCX - 40 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 60 J = 1,N - IF (X(J).NE.ZERO) THEN - L = 1 - J - IF (NOUNIT) X(J) = X(J)/A(1,J) - TEMP = X(J) - DO 50 I = J + 1,MIN(N,J+K) - X(I) = X(I) - TEMP*A(L+I,J) - 50 CONTINUE - END IF - 60 CONTINUE - ELSE - JX = KX - DO 80 J = 1,N - KX = KX + INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = 1 - J - IF (NOUNIT) X(JX) = X(JX)/A(1,J) - TEMP = X(JX) - DO 70 I = J + 1,MIN(N,J+K) - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX + INCX - 70 CONTINUE - END IF - JX = JX + INCX - 80 CONTINUE - END IF - END IF - ELSE -* -* Form x := inv( A')*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 100 J = 1,N - TEMP = X(J) - L = KPLUS1 - J - DO 90 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(I) - 90 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - X(J) = TEMP - 100 CONTINUE - ELSE - JX = KX - DO 120 J = 1,N - TEMP = X(JX) - IX = KX - L = KPLUS1 - J - DO 110 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX + INCX - 110 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - X(JX) = TEMP - JX = JX + INCX - IF (J.GT.K) KX = KX + INCX - 120 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 140 J = N,1,-1 - TEMP = X(J) - L = 1 - J - DO 130 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(I) - 130 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - X(J) = TEMP - 140 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 160 J = N,1,-1 - TEMP = X(JX) - IX = KX - L = 1 - J - DO 150 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX - INCX - 150 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - X(JX) = TEMP - JX = JX - INCX - IF ((N-J).GE.K) KX = KX - INCX - 160 CONTINUE - END IF - END IF - END IF -* - RETURN -* -* End of DTBSV . -* - END diff --git a/gtsam/3rdparty/Eigen/blas/level2_impl.h b/gtsam/3rdparty/Eigen/blas/level2_impl.h index 8cbc2f424..46a3e7005 100644 --- a/gtsam/3rdparty/Eigen/blas/level2_impl.h +++ b/gtsam/3rdparty/Eigen/blas/level2_impl.h @@ -151,21 +151,21 @@ int EIGEN_BLAS_FUNC(trmv)(char *uplo, char *opa, char *diag, int *n, RealScalar for(int k=0; k<16; ++k) func[k] = 0; - func[NOTR | (UP << 2) | (NUNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[TR | (UP << 2) | (NUNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[ADJ | (UP << 2) | (NUNIT << 3)] = (internal::product_triangular_matrix_vector::run); + func[NOTR | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[TR | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[ADJ | (UP << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product::run); - func[NOTR | (LO << 2) | (NUNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[TR | (LO << 2) | (NUNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[ADJ | (LO << 2) | (NUNIT << 3)] = (internal::product_triangular_matrix_vector::run); + func[NOTR | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[TR | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[ADJ | (LO << 2) | (NUNIT << 3)] = (internal::triangular_matrix_vector_product::run); - func[NOTR | (UP << 2) | (UNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[TR | (UP << 2) | (UNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[ADJ | (UP << 2) | (UNIT << 3)] = (internal::product_triangular_matrix_vector::run); + func[NOTR | (UP << 2) | (UNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[TR | (UP << 2) | (UNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[ADJ | (UP << 2) | (UNIT << 3)] = (internal::triangular_matrix_vector_product::run); - func[NOTR | (LO << 2) | (UNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[TR | (LO << 2) | (UNIT << 3)] = (internal::product_triangular_matrix_vector::run); - func[ADJ | (LO << 2) | (UNIT << 3)] = (internal::product_triangular_matrix_vector::run); + func[NOTR | (LO << 2) | (UNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[TR | (LO << 2) | (UNIT << 3)] = (internal::triangular_matrix_vector_product::run); + func[ADJ | (LO << 2) | (UNIT << 3)] = (internal::triangular_matrix_vector_product::run); init = true; } @@ -271,6 +271,7 @@ int EIGEN_BLAS_FUNC(gbmv)(char *trans, int *m, int *n, int *kl, int *ku, RealSca return 0; } +#if 0 /** TBMV performs one of the matrix-vector operations * * x := A*x, or x := A'*x, @@ -278,10 +279,56 @@ int EIGEN_BLAS_FUNC(gbmv)(char *trans, int *m, int *n, int *kl, int *ku, RealSca * where x is an n element vector and A is an n by n unit, or non-unit, * upper or lower triangular band matrix, with ( k + 1 ) diagonals. */ -// int EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *trans, char *diag, int *n, int *k, RealScalar *a, int *lda, RealScalar *x, int *incx) -// { -// return 1; -// } +int EIGEN_BLAS_FUNC(tbmv)(char *uplo, char *opa, char *diag, int *n, int *k, RealScalar *pa, int *lda, RealScalar *px, int *incx) +{ + Scalar* a = reinterpret_cast(pa); + Scalar* x = reinterpret_cast(px); + int coeff_rows = *k + 1; + + int info = 0; + if(UPLO(*uplo)==INVALID) info = 1; + else if(OP(*opa)==INVALID) info = 2; + else if(DIAG(*diag)==INVALID) info = 3; + else if(*n<0) info = 4; + else if(*k<0) info = 5; + else if(*lda::run); + func[TR | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[ADJ | (UP << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector::run); + + func[NOTR | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[TR | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[ADJ | (LO << 2) | (NUNIT << 3)] = (internal::band_solve_triangular_selector::run); + + func[NOTR | (UP << 2) | (UNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[TR | (UP << 2) | (UNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[ADJ | (UP << 2) | (UNIT << 3)] = (internal::band_solve_triangular_selector::run); + + func[NOTR | (LO << 2) | (UNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[TR | (LO << 2) | (UNIT << 3)] = (internal::band_solve_triangular_selector::run); + func[ADJ | (LO << 2) | (UNIT << 3)] = (internal::band_solve_triangular_selector::run); + + init = true; + } + + Scalar* a = reinterpret_cast(pa); + Scalar* x = reinterpret_cast(px); + int coeff_rows = *k+1; + + int info = 0; + if(UPLO(*uplo)==INVALID) info = 1; + else if(OP(*op)==INVALID) info = 2; + else if(DIAG(*diag)==INVALID) info = 3; + else if(*n<0) info = 4; + else if(*k<0) info = 5; + else if(*lda=16 || func[code]==0) + return 0; + + func[code](*n, *k, a, *lda, actual_x); + + if(actual_x!=x) delete[] copy_back(actual_x,x,actual_n,*incx); + + return 0; +} /** DTPMV performs one of the matrix-vector operations * diff --git a/gtsam/3rdparty/Eigen/blas/level3_impl.h b/gtsam/3rdparty/Eigen/blas/level3_impl.h index 0a3aa98b8..4f4f39080 100644 --- a/gtsam/3rdparty/Eigen/blas/level3_impl.h +++ b/gtsam/3rdparty/Eigen/blas/level3_impl.h @@ -81,7 +81,7 @@ int EIGEN_BLAS_FUNC(gemm)(char *opa, char *opb, int *m, int *n, int *k, RealScal int EIGEN_BLAS_FUNC(trsm)(char *side, char *uplo, char *opa, char *diag, int *m, int *n, RealScalar *palpha, RealScalar *pa, int *lda, RealScalar *pb, int *ldb) { // std::cerr << "in trsm " << *side << " " << *uplo << " " << *opa << " " << *diag << " " << *m << "," << *n << " " << *palpha << " " << *lda << " " << *ldb<< "\n"; - typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex); + typedef void (*functype)(DenseIndex, DenseIndex, const Scalar *, DenseIndex, Scalar *, DenseIndex, internal::level3_blocking&); static functype func[32]; static bool init = false; @@ -143,11 +143,17 @@ int EIGEN_BLAS_FUNC(trsm)(char *side, char *uplo, char *opa, char *diag, int *m, return xerbla_(SCALAR_SUFFIX_UP"TRSM ",&info,6); int code = OP(*opa) | (SIDE(*side) << 2) | (UPLO(*uplo) << 3) | (DIAG(*diag) << 4); - + if(SIDE(*side)==LEFT) - func[code](*m, *n, a, *lda, b, *ldb); + { + internal::gemm_blocking_space blocking(*m,*n,*m); + func[code](*m, *n, a, *lda, b, *ldb, blocking); + } else - func[code](*n, *m, a, *lda, b, *ldb); + { + internal::gemm_blocking_space blocking(*m,*n,*n); + func[code](*n, *m, a, *lda, b, *ldb, blocking); + } if(alpha!=Scalar(1)) matrix(b,*m,*n,*ldb) *= alpha; diff --git a/gtsam/3rdparty/Eigen/blas/stbsv.f b/gtsam/3rdparty/Eigen/blas/stbsv.f deleted file mode 100644 index b846be85c..000000000 --- a/gtsam/3rdparty/Eigen/blas/stbsv.f +++ /dev/null @@ -1,339 +0,0 @@ - SUBROUTINE STBSV(UPLO,TRANS,DIAG,N,K,A,LDA,X,INCX) -* .. Scalar Arguments .. - INTEGER INCX,K,LDA,N - CHARACTER DIAG,TRANS,UPLO -* .. -* .. Array Arguments .. - REAL A(LDA,*),X(*) -* .. -* -* Purpose -* ======= -* -* STBSV solves one of the systems of equations -* -* A*x = b, or A'*x = b, -* -* where b and x are n element vectors and A is an n by n unit, or -* non-unit, upper or lower triangular band matrix, with ( k + 1 ) -* diagonals. -* -* No test for singularity or near-singularity is included in this -* routine. Such tests must be performed before calling this routine. -* -* Arguments -* ========== -* -* UPLO - CHARACTER*1. -* On entry, UPLO specifies whether the matrix is an upper or -* lower triangular matrix as follows: -* -* UPLO = 'U' or 'u' A is an upper triangular matrix. -* -* UPLO = 'L' or 'l' A is a lower triangular matrix. -* -* Unchanged on exit. -* -* TRANS - CHARACTER*1. -* On entry, TRANS specifies the equations to be solved as -* follows: -* -* TRANS = 'N' or 'n' A*x = b. -* -* TRANS = 'T' or 't' A'*x = b. -* -* TRANS = 'C' or 'c' A'*x = b. -* -* Unchanged on exit. -* -* DIAG - CHARACTER*1. -* On entry, DIAG specifies whether or not A is unit -* triangular as follows: -* -* DIAG = 'U' or 'u' A is assumed to be unit triangular. -* -* DIAG = 'N' or 'n' A is not assumed to be unit -* triangular. -* -* Unchanged on exit. -* -* N - INTEGER. -* On entry, N specifies the order of the matrix A. -* N must be at least zero. -* Unchanged on exit. -* -* K - INTEGER. -* On entry with UPLO = 'U' or 'u', K specifies the number of -* super-diagonals of the matrix A. -* On entry with UPLO = 'L' or 'l', K specifies the number of -* sub-diagonals of the matrix A. -* K must satisfy 0 .le. K. -* Unchanged on exit. -* -* A - REAL array of DIMENSION ( LDA, n ). -* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) -* by n part of the array A must contain the upper triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row -* ( k + 1 ) of the array, the first super-diagonal starting at -* position 2 in row k, and so on. The top left k by k triangle -* of the array A is not referenced. -* The following program segment will transfer an upper -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = K + 1 - J -* DO 10, I = MAX( 1, J - K ), J -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) -* by n part of the array A must contain the lower triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row 1 of -* the array, the first sub-diagonal starting at position 1 in -* row 2, and so on. The bottom right k by k triangle of the -* array A is not referenced. -* The following program segment will transfer a lower -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = 1 - J -* DO 10, I = J, MIN( N, J + K ) -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Note that when DIAG = 'U' or 'u' the elements of the array A -* corresponding to the diagonal elements of the matrix are not -* referenced, but are assumed to be unity. -* Unchanged on exit. -* -* LDA - INTEGER. -* On entry, LDA specifies the first dimension of A as declared -* in the calling (sub) program. LDA must be at least -* ( k + 1 ). -* Unchanged on exit. -* -* X - REAL array of dimension at least -* ( 1 + ( n - 1 )*abs( INCX ) ). -* Before entry, the incremented array X must contain the n -* element right-hand side vector b. On exit, X is overwritten -* with the solution vector x. -* -* INCX - INTEGER. -* On entry, INCX specifies the increment for the elements of -* X. INCX must not be zero. -* Unchanged on exit. -* -* Further Details -* =============== -* -* Level 2 Blas routine. -* -* -- Written on 22-October-1986. -* Jack Dongarra, Argonne National Lab. -* Jeremy Du Croz, Nag Central Office. -* Sven Hammarling, Nag Central Office. -* Richard Hanson, Sandia National Labs. -* -* ===================================================================== -* -* .. Parameters .. - REAL ZERO - PARAMETER (ZERO=0.0E+0) -* .. -* .. Local Scalars .. - REAL TEMP - INTEGER I,INFO,IX,J,JX,KPLUS1,KX,L - LOGICAL NOUNIT -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME -* .. -* .. External Subroutines .. - EXTERNAL XERBLA -* .. -* .. Intrinsic Functions .. - INTRINSIC MAX,MIN -* .. -* -* Test the input parameters. -* - INFO = 0 - IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN - INFO = 1 - ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND. - + .NOT.LSAME(TRANS,'C')) THEN - INFO = 2 - ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN - INFO = 3 - ELSE IF (N.LT.0) THEN - INFO = 4 - ELSE IF (K.LT.0) THEN - INFO = 5 - ELSE IF (LDA.LT. (K+1)) THEN - INFO = 7 - ELSE IF (INCX.EQ.0) THEN - INFO = 9 - END IF - IF (INFO.NE.0) THEN - CALL XERBLA('STBSV ',INFO) - RETURN - END IF -* -* Quick return if possible. -* - IF (N.EQ.0) RETURN -* - NOUNIT = LSAME(DIAG,'N') -* -* Set up the start point in X if the increment is not unity. This -* will be ( N - 1 )*INCX too small for descending loops. -* - IF (INCX.LE.0) THEN - KX = 1 - (N-1)*INCX - ELSE IF (INCX.NE.1) THEN - KX = 1 - END IF -* -* Start the operations. In this version the elements of A are -* accessed by sequentially with one pass through A. -* - IF (LSAME(TRANS,'N')) THEN -* -* Form x := inv( A )*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 20 J = N,1,-1 - IF (X(J).NE.ZERO) THEN - L = KPLUS1 - J - IF (NOUNIT) X(J) = X(J)/A(KPLUS1,J) - TEMP = X(J) - DO 10 I = J - 1,MAX(1,J-K),-1 - X(I) = X(I) - TEMP*A(L+I,J) - 10 CONTINUE - END IF - 20 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 40 J = N,1,-1 - KX = KX - INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = KPLUS1 - J - IF (NOUNIT) X(JX) = X(JX)/A(KPLUS1,J) - TEMP = X(JX) - DO 30 I = J - 1,MAX(1,J-K),-1 - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX - INCX - 30 CONTINUE - END IF - JX = JX - INCX - 40 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 60 J = 1,N - IF (X(J).NE.ZERO) THEN - L = 1 - J - IF (NOUNIT) X(J) = X(J)/A(1,J) - TEMP = X(J) - DO 50 I = J + 1,MIN(N,J+K) - X(I) = X(I) - TEMP*A(L+I,J) - 50 CONTINUE - END IF - 60 CONTINUE - ELSE - JX = KX - DO 80 J = 1,N - KX = KX + INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = 1 - J - IF (NOUNIT) X(JX) = X(JX)/A(1,J) - TEMP = X(JX) - DO 70 I = J + 1,MIN(N,J+K) - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX + INCX - 70 CONTINUE - END IF - JX = JX + INCX - 80 CONTINUE - END IF - END IF - ELSE -* -* Form x := inv( A')*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 100 J = 1,N - TEMP = X(J) - L = KPLUS1 - J - DO 90 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(I) - 90 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - X(J) = TEMP - 100 CONTINUE - ELSE - JX = KX - DO 120 J = 1,N - TEMP = X(JX) - IX = KX - L = KPLUS1 - J - DO 110 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX + INCX - 110 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - X(JX) = TEMP - JX = JX + INCX - IF (J.GT.K) KX = KX + INCX - 120 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 140 J = N,1,-1 - TEMP = X(J) - L = 1 - J - DO 130 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(I) - 130 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - X(J) = TEMP - 140 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 160 J = N,1,-1 - TEMP = X(JX) - IX = KX - L = 1 - J - DO 150 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX - INCX - 150 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - X(JX) = TEMP - JX = JX - INCX - IF ((N-J).GE.K) KX = KX - INCX - 160 CONTINUE - END IF - END IF - END IF -* - RETURN -* -* End of STBSV . -* - END diff --git a/gtsam/3rdparty/Eigen/blas/testing/runblastest.sh b/gtsam/3rdparty/Eigen/blas/testing/runblastest.sh index aa634a2ce..4ffaf0111 100755 --- a/gtsam/3rdparty/Eigen/blas/testing/runblastest.sh +++ b/gtsam/3rdparty/Eigen/blas/testing/runblastest.sh @@ -27,11 +27,17 @@ else if [ -f $1.summ ]; then if [ `grep "FATAL ERROR" $1.summ | wc -l` -gt 0 ]; then echo -e $red "Test $1 failed (FATAL ERROR, read the file $1.summ for details)" $black + echo -e $blue + cat .runtest.log + echo -e $black exit 1; fi if [ `grep "FAILED THE TESTS OF ERROR-EXITS" $1.summ | wc -l` -gt 0 ]; then echo -e $red "Test $1 failed (FAILED THE TESTS OF ERROR-EXITS, read the file $1.summ for details)" $black + echo -e $blue + cat .runtest.log + echo -e $black exit 1; fi fi diff --git a/gtsam/3rdparty/Eigen/blas/xerbla.cpp b/gtsam/3rdparty/Eigen/blas/xerbla.cpp index bda1d2f46..0d57710fe 100644 --- a/gtsam/3rdparty/Eigen/blas/xerbla.cpp +++ b/gtsam/3rdparty/Eigen/blas/xerbla.cpp @@ -1,12 +1,18 @@ #include +#if (defined __GNUC__) +#define EIGEN_WEAK_LINKING __attribute__ ((weak)) +#else +#define EIGEN_WEAK_LINKING +#endif + #ifdef __cplusplus extern "C" { #endif -int xerbla_(const char * msg, int *info, int) +EIGEN_WEAK_LINKING int xerbla_(const char * msg, int *info, int) { std::cerr << "Eigen BLAS ERROR #" << *info << ": " << msg << "\n"; return 0; diff --git a/gtsam/3rdparty/Eigen/blas/ztbsv.f b/gtsam/3rdparty/Eigen/blas/ztbsv.f deleted file mode 100644 index 42b234a77..000000000 --- a/gtsam/3rdparty/Eigen/blas/ztbsv.f +++ /dev/null @@ -1,370 +0,0 @@ - SUBROUTINE ZTBSV(UPLO,TRANS,DIAG,N,K,A,LDA,X,INCX) -* .. Scalar Arguments .. - INTEGER INCX,K,LDA,N - CHARACTER DIAG,TRANS,UPLO -* .. -* .. Array Arguments .. - DOUBLE COMPLEX A(LDA,*),X(*) -* .. -* -* Purpose -* ======= -* -* ZTBSV solves one of the systems of equations -* -* A*x = b, or A'*x = b, or conjg( A' )*x = b, -* -* where b and x are n element vectors and A is an n by n unit, or -* non-unit, upper or lower triangular band matrix, with ( k + 1 ) -* diagonals. -* -* No test for singularity or near-singularity is included in this -* routine. Such tests must be performed before calling this routine. -* -* Arguments -* ========== -* -* UPLO - CHARACTER*1. -* On entry, UPLO specifies whether the matrix is an upper or -* lower triangular matrix as follows: -* -* UPLO = 'U' or 'u' A is an upper triangular matrix. -* -* UPLO = 'L' or 'l' A is a lower triangular matrix. -* -* Unchanged on exit. -* -* TRANS - CHARACTER*1. -* On entry, TRANS specifies the equations to be solved as -* follows: -* -* TRANS = 'N' or 'n' A*x = b. -* -* TRANS = 'T' or 't' A'*x = b. -* -* TRANS = 'C' or 'c' conjg( A' )*x = b. -* -* Unchanged on exit. -* -* DIAG - CHARACTER*1. -* On entry, DIAG specifies whether or not A is unit -* triangular as follows: -* -* DIAG = 'U' or 'u' A is assumed to be unit triangular. -* -* DIAG = 'N' or 'n' A is not assumed to be unit -* triangular. -* -* Unchanged on exit. -* -* N - INTEGER. -* On entry, N specifies the order of the matrix A. -* N must be at least zero. -* Unchanged on exit. -* -* K - INTEGER. -* On entry with UPLO = 'U' or 'u', K specifies the number of -* super-diagonals of the matrix A. -* On entry with UPLO = 'L' or 'l', K specifies the number of -* sub-diagonals of the matrix A. -* K must satisfy 0 .le. K. -* Unchanged on exit. -* -* A - COMPLEX*16 array of DIMENSION ( LDA, n ). -* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) -* by n part of the array A must contain the upper triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row -* ( k + 1 ) of the array, the first super-diagonal starting at -* position 2 in row k, and so on. The top left k by k triangle -* of the array A is not referenced. -* The following program segment will transfer an upper -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = K + 1 - J -* DO 10, I = MAX( 1, J - K ), J -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) -* by n part of the array A must contain the lower triangular -* band part of the matrix of coefficients, supplied column by -* column, with the leading diagonal of the matrix in row 1 of -* the array, the first sub-diagonal starting at position 1 in -* row 2, and so on. The bottom right k by k triangle of the -* array A is not referenced. -* The following program segment will transfer a lower -* triangular band matrix from conventional full matrix storage -* to band storage: -* -* DO 20, J = 1, N -* M = 1 - J -* DO 10, I = J, MIN( N, J + K ) -* A( M + I, J ) = matrix( I, J ) -* 10 CONTINUE -* 20 CONTINUE -* -* Note that when DIAG = 'U' or 'u' the elements of the array A -* corresponding to the diagonal elements of the matrix are not -* referenced, but are assumed to be unity. -* Unchanged on exit. -* -* LDA - INTEGER. -* On entry, LDA specifies the first dimension of A as declared -* in the calling (sub) program. LDA must be at least -* ( k + 1 ). -* Unchanged on exit. -* -* X - COMPLEX*16 array of dimension at least -* ( 1 + ( n - 1 )*abs( INCX ) ). -* Before entry, the incremented array X must contain the n -* element right-hand side vector b. On exit, X is overwritten -* with the solution vector x. -* -* INCX - INTEGER. -* On entry, INCX specifies the increment for the elements of -* X. INCX must not be zero. -* Unchanged on exit. -* -* Further Details -* =============== -* -* Level 2 Blas routine. -* -* -- Written on 22-October-1986. -* Jack Dongarra, Argonne National Lab. -* Jeremy Du Croz, Nag Central Office. -* Sven Hammarling, Nag Central Office. -* Richard Hanson, Sandia National Labs. -* -* ===================================================================== -* -* .. Parameters .. - DOUBLE COMPLEX ZERO - PARAMETER (ZERO= (0.0D+0,0.0D+0)) -* .. -* .. Local Scalars .. - DOUBLE COMPLEX TEMP - INTEGER I,INFO,IX,J,JX,KPLUS1,KX,L - LOGICAL NOCONJ,NOUNIT -* .. -* .. External Functions .. - LOGICAL LSAME - EXTERNAL LSAME -* .. -* .. External Subroutines .. - EXTERNAL XERBLA -* .. -* .. Intrinsic Functions .. - INTRINSIC DCONJG,MAX,MIN -* .. -* -* Test the input parameters. -* - INFO = 0 - IF (.NOT.LSAME(UPLO,'U') .AND. .NOT.LSAME(UPLO,'L')) THEN - INFO = 1 - ELSE IF (.NOT.LSAME(TRANS,'N') .AND. .NOT.LSAME(TRANS,'T') .AND. - + .NOT.LSAME(TRANS,'C')) THEN - INFO = 2 - ELSE IF (.NOT.LSAME(DIAG,'U') .AND. .NOT.LSAME(DIAG,'N')) THEN - INFO = 3 - ELSE IF (N.LT.0) THEN - INFO = 4 - ELSE IF (K.LT.0) THEN - INFO = 5 - ELSE IF (LDA.LT. (K+1)) THEN - INFO = 7 - ELSE IF (INCX.EQ.0) THEN - INFO = 9 - END IF - IF (INFO.NE.0) THEN - CALL XERBLA('ZTBSV ',INFO) - RETURN - END IF -* -* Quick return if possible. -* - IF (N.EQ.0) RETURN -* - NOCONJ = LSAME(TRANS,'T') - NOUNIT = LSAME(DIAG,'N') -* -* Set up the start point in X if the increment is not unity. This -* will be ( N - 1 )*INCX too small for descending loops. -* - IF (INCX.LE.0) THEN - KX = 1 - (N-1)*INCX - ELSE IF (INCX.NE.1) THEN - KX = 1 - END IF -* -* Start the operations. In this version the elements of A are -* accessed by sequentially with one pass through A. -* - IF (LSAME(TRANS,'N')) THEN -* -* Form x := inv( A )*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 20 J = N,1,-1 - IF (X(J).NE.ZERO) THEN - L = KPLUS1 - J - IF (NOUNIT) X(J) = X(J)/A(KPLUS1,J) - TEMP = X(J) - DO 10 I = J - 1,MAX(1,J-K),-1 - X(I) = X(I) - TEMP*A(L+I,J) - 10 CONTINUE - END IF - 20 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 40 J = N,1,-1 - KX = KX - INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = KPLUS1 - J - IF (NOUNIT) X(JX) = X(JX)/A(KPLUS1,J) - TEMP = X(JX) - DO 30 I = J - 1,MAX(1,J-K),-1 - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX - INCX - 30 CONTINUE - END IF - JX = JX - INCX - 40 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 60 J = 1,N - IF (X(J).NE.ZERO) THEN - L = 1 - J - IF (NOUNIT) X(J) = X(J)/A(1,J) - TEMP = X(J) - DO 50 I = J + 1,MIN(N,J+K) - X(I) = X(I) - TEMP*A(L+I,J) - 50 CONTINUE - END IF - 60 CONTINUE - ELSE - JX = KX - DO 80 J = 1,N - KX = KX + INCX - IF (X(JX).NE.ZERO) THEN - IX = KX - L = 1 - J - IF (NOUNIT) X(JX) = X(JX)/A(1,J) - TEMP = X(JX) - DO 70 I = J + 1,MIN(N,J+K) - X(IX) = X(IX) - TEMP*A(L+I,J) - IX = IX + INCX - 70 CONTINUE - END IF - JX = JX + INCX - 80 CONTINUE - END IF - END IF - ELSE -* -* Form x := inv( A' )*x or x := inv( conjg( A') )*x. -* - IF (LSAME(UPLO,'U')) THEN - KPLUS1 = K + 1 - IF (INCX.EQ.1) THEN - DO 110 J = 1,N - TEMP = X(J) - L = KPLUS1 - J - IF (NOCONJ) THEN - DO 90 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(I) - 90 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - ELSE - DO 100 I = MAX(1,J-K),J - 1 - TEMP = TEMP - DCONJG(A(L+I,J))*X(I) - 100 CONTINUE - IF (NOUNIT) TEMP = TEMP/DCONJG(A(KPLUS1,J)) - END IF - X(J) = TEMP - 110 CONTINUE - ELSE - JX = KX - DO 140 J = 1,N - TEMP = X(JX) - IX = KX - L = KPLUS1 - J - IF (NOCONJ) THEN - DO 120 I = MAX(1,J-K),J - 1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX + INCX - 120 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(KPLUS1,J) - ELSE - DO 130 I = MAX(1,J-K),J - 1 - TEMP = TEMP - DCONJG(A(L+I,J))*X(IX) - IX = IX + INCX - 130 CONTINUE - IF (NOUNIT) TEMP = TEMP/DCONJG(A(KPLUS1,J)) - END IF - X(JX) = TEMP - JX = JX + INCX - IF (J.GT.K) KX = KX + INCX - 140 CONTINUE - END IF - ELSE - IF (INCX.EQ.1) THEN - DO 170 J = N,1,-1 - TEMP = X(J) - L = 1 - J - IF (NOCONJ) THEN - DO 150 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(I) - 150 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - ELSE - DO 160 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - DCONJG(A(L+I,J))*X(I) - 160 CONTINUE - IF (NOUNIT) TEMP = TEMP/DCONJG(A(1,J)) - END IF - X(J) = TEMP - 170 CONTINUE - ELSE - KX = KX + (N-1)*INCX - JX = KX - DO 200 J = N,1,-1 - TEMP = X(JX) - IX = KX - L = 1 - J - IF (NOCONJ) THEN - DO 180 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - A(L+I,J)*X(IX) - IX = IX - INCX - 180 CONTINUE - IF (NOUNIT) TEMP = TEMP/A(1,J) - ELSE - DO 190 I = MIN(N,J+K),J + 1,-1 - TEMP = TEMP - DCONJG(A(L+I,J))*X(IX) - IX = IX - INCX - 190 CONTINUE - IF (NOUNIT) TEMP = TEMP/DCONJG(A(1,J)) - END IF - X(JX) = TEMP - JX = JX - INCX - IF ((N-J).GE.K) KX = KX - INCX - 200 CONTINUE - END IF - END IF - END IF -* - RETURN -* -* End of ZTBSV . -* - END diff --git a/gtsam/3rdparty/Eigen/cmake/CMakeDetermineVSServicePack.cmake b/gtsam/3rdparty/Eigen/cmake/CMakeDetermineVSServicePack.cmake new file mode 100644 index 000000000..b89462308 --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/CMakeDetermineVSServicePack.cmake @@ -0,0 +1,103 @@ +# - Includes a public function for assisting users in trying to determine the +# Visual Studio service pack in use. +# +# Sets the passed in variable to one of the following values or an empty +# string if unknown. +# vc80 +# vc80sp1 +# vc90 +# vc90sp1 +# +# Usage: +# =========================== +# +# if(MSVC) +# include(CMakeDetermineVSServicePack) +# DetermineVSServicePack( my_service_pack ) +# +# if( my_service_pack ) +# message(STATUS "Detected: ${my_service_pack}") +# endif() +# endif() +# +# =========================== + +#============================================================================= +# Copyright 2009-2010 Kitware, Inc. +# Copyright 2009-2010 Philip Lowman +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +# [INTERNAL] +# Please do not call this function directly +function(_DetermineVSServicePackFromCompiler _OUT_VAR _cl_version) + if (${_cl_version} VERSION_EQUAL "14.00.50727.42") + set(_version "vc80") + elseif(${_cl_version} VERSION_EQUAL "14.00.50727.762") + set(_version "vc80sp1") + elseif(${_cl_version} VERSION_EQUAL "15.00.21022.08") + set(_version "vc90") + elseif(${_cl_version} VERSION_EQUAL "15.00.30729.01") + set(_version "vc90sp1") + elseif(${_cl_version} VERSION_EQUAL "16.00.30319.01") + set(_version "vc100") + else() + set(_version "") + endif() + set(${_OUT_VAR} ${_version} PARENT_SCOPE) +endfunction() + +# +# A function to call to determine the Visual Studio service pack +# in use. See documentation above. +function(DetermineVSServicePack _pack) + if(NOT DETERMINED_VS_SERVICE_PACK OR NOT ${_pack}) + if(${CMAKE_BUILD_TOOL} STREQUAL "nmake") + EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} "/?" + ERROR_VARIABLE _output) + set(DETERMINED_VS_SERVICE_PACK ${_output}) + else() + file(WRITE "${CMAKE_BINARY_DIR}/return0.cc" + "int main() { return 0; }\n") + + try_compile(DETERMINED_VS_SERVICE_PACK + "${CMAKE_BINARY_DIR}" + "${CMAKE_BINARY_DIR}/return0.cc" + OUTPUT_VARIABLE _output + COPY_FILE "${CMAKE_BINARY_DIR}/return0.cc") + + file(REMOVE "${CMAKE_BINARY_DIR}/return0.cc") + endif() + + if(DETERMINED_VS_SERVICE_PACK AND _output) + string(REGEX MATCH "Compiler Version [0-9]+.[0-9]+.[0-9]+.[0-9]+" + _cl_version "${_output}") + if(_cl_version) + string(REGEX MATCHALL "[0-9]+" + _cl_version_list "${_cl_version}") + list(GET _cl_version_list 0 _major) + list(GET _cl_version_list 1 _minor) + list(GET _cl_version_list 2 _patch) + list(GET _cl_version_list 3 _tweak) + + set(_cl_version_string ${_major}.${_minor}.${_patch}.${_tweak}) + + # Call helper function to determine VS version + _DetermineVSServicePackFromCompiler(_sp "${_cl_version_string}") + if(_sp) + #set(${_pack} "${_sp}(${_cl_version_string})" CACHE INTERNAL + set(${_pack} "${_sp}" CACHE INTERNAL + "The Visual Studio Release with Service Pack") + endif() + endif() + endif() + endif() +endfunction() diff --git a/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake b/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake new file mode 100644 index 000000000..cf8f32c01 --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake @@ -0,0 +1,79 @@ +include(EigenTesting) +include(CheckCXXSourceCompiles) + +# configure the "site" and "buildname" +ei_set_sitename() + +# retrieve and store the build string +ei_set_build_string() + +add_custom_target(buildtests) +add_custom_target(check COMMAND "ctest") +add_dependencies(check buildtests) + +# check whether /bin/bash exists +find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH) + +# CMake/Ctest does not allow us to change the build command, +# so we have to workaround by directly editing the generated DartConfiguration.tcl file +# save CMAKE_MAKE_PROGRAM +set(CMAKE_MAKE_PROGRAM_SAVE ${CMAKE_MAKE_PROGRAM}) +# and set a fake one +set(CMAKE_MAKE_PROGRAM "@EIGEN_MAKECOMMAND_PLACEHOLDER@") + +# This call activates testing and generates the DartConfiguration.tcl +include(CTest) + +# overwrite default DartConfiguration.tcl +# The worarounds are different for each version of the MSVC IDE +if(MSVC_IDE) + if(MSVC_VERSION EQUAL 1600) # MSVC 2010 + set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests.vcxproj /p:Configuration=\${CTEST_CONFIGURATION_TYPE} \n# ") + else() # MSVC 2008 (TODO check MSVC 2005) + set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} Eigen.sln /build \"Release\" /project buildtests \n# ") + endif() +else() + # for make and nmake + set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests") +endif() + +# copy ctest properties, which currently +# o raise the warning levels +configure_file(${CMAKE_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl) + +# restore default CMAKE_MAKE_PROGRAM +set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE}) +# un-set temporary variables so that it is like they never existed. +# CMake 2.6.3 introduces the more logical unset() syntax for this. +set(CMAKE_MAKE_PROGRAM_SAVE) +set(EIGEN_MAKECOMMAND_PLACEHOLDER) + +configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) + +# some documentation of this function would be nice +ei_init_testing() + +# configure Eigen related testing options +option(EIGEN_NO_ASSERTION_CHECKING "Disable checking of assertions using exceptions" OFF) +option(EIGEN_DEBUG_ASSERTS "Enable advanced debuging of assertions" OFF) + +if(CMAKE_COMPILER_IS_GNUCXX) + option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF) + if(EIGEN_COVERAGE_TESTING) + set(COVERAGE_FLAGS "-fprofile-arcs -ftest-coverage") + set(CTEST_CUSTOM_COVERAGE_EXCLUDE "/test/") + else(EIGEN_COVERAGE_TESTING) + set(COVERAGE_FLAGS "") + endif(EIGEN_COVERAGE_TESTING) + if(EIGEN_TEST_C++0x) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++0x") + endif(EIGEN_TEST_C++0x) + if(CMAKE_SYSTEM_NAME MATCHES Linux) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_FLAGS} -g2") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${COVERAGE_FLAGS} -O2 -g2") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COVERAGE_FLAGS} -fno-inline-functions") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COVERAGE_FLAGS} -O0 -g3") + endif(CMAKE_SYSTEM_NAME MATCHES Linux) +elseif(MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /D_CRT_SECURE_NO_WARNINGS /D_SCL_SECURE_NO_WARNINGS") +endif(CMAKE_COMPILER_IS_GNUCXX) diff --git a/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake b/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake new file mode 100644 index 000000000..3c48d4c37 --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake @@ -0,0 +1,46 @@ +# The utility function DetermineOSVersion aims at providing an +# improved version of the CMake variable ${CMAKE_SYSTEM} on Windows +# machines. +# +# Usage: +# include(EigenDetermineOSVersion) +# DetermineOSVersion(OS_VERSION) +# message("OS: ${OS_VERSION}") + +# - A little helper variable which should not be directly called +function(DetermineShortWindowsName WIN_VERSION win_num_version) + if (${win_num_version} VERSION_EQUAL "6.1") + set(_version "win7") + elseif(${win_num_version} VERSION_EQUAL "6.0") + set(_version "winVista") + elseif(${win_num_version} VERSION_EQUAL "5.2") + set(_version "winXpProf") + elseif(${win_num_version} VERSION_EQUAL "5.1") + set(_version "winXp") + elseif(${win_num_version} VERSION_EQUAL "5.0") + set(_version "win2000Prof") + else() + set(_version "unknownWin") + endif() + set(${WIN_VERSION} ${_version} PARENT_SCOPE) +endfunction() + +function(DetermineOSVersion OS_VERSION) + if (WIN32) + file (TO_NATIVE_PATH "$ENV{COMSPEC}" SHELL) + exec_program( ${SHELL} ARGS "/c" "ver" OUTPUT_VARIABLE ver_output) + + string(REGEX MATCHALL "[0-9]+" + ver_list "${ver_output}") + list(GET ver_list 0 _major) + list(GET ver_list 1 _minor) + + set(win_num_version ${_major}.${_minor}) + DetermineShortWindowsName(win_version "${win_num_version}") + if(win_version) + set(${OS_VERSION} ${win_version} PARENT_SCOPE) + endif() + else() + set(${OS_VERSION} ${CMAKE_SYSTEM} PARENT_SCOPE) + endif() +endfunction() diff --git a/gtsam/3rdparty/Eigen/cmake/EigenTesting.cmake b/gtsam/3rdparty/Eigen/cmake/EigenTesting.cmake index 4c8039315..266043974 100644 --- a/gtsam/3rdparty/Eigen/cmake/EigenTesting.cmake +++ b/gtsam/3rdparty/Eigen/cmake/EigenTesting.cmake @@ -1,11 +1,11 @@ -option(EIGEN_NO_ASSERTION_CHECKING "Disable checking of assertions using exceptions" OFF) -option(EIGEN_DEBUG_ASSERTS "Enable advanced debuging of assertions" OFF) - -include(CheckCXXSourceCompiles) macro(ei_add_property prop value) - get_property(previous GLOBAL PROPERTY ${prop}) - set_property(GLOBAL PROPERTY ${prop} "${previous} ${value}") + get_property(previous GLOBAL PROPERTY ${prop}) + if ((NOT previous) OR (previous STREQUAL "")) + set_property(GLOBAL PROPERTY ${prop} "${value}") + else() + set_property(GLOBAL PROPERTY ${prop} "${previous} ${value}") + endif() endmacro(ei_add_property) #internal. See documentation of ei_add_test for details. @@ -27,6 +27,8 @@ macro(ei_add_test_internal testname testname_with_suffix) ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_DEBUG_ASSERTS=1") endif(EIGEN_DEBUG_ASSERTS) endif(EIGEN_NO_ASSERTION_CHECKING) + + ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE}") ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}") @@ -38,6 +40,10 @@ macro(ei_add_test_internal testname testname_with_suffix) if(${ARGC} GREATER 2) ei_add_target_property(${targetname} COMPILE_FLAGS "${ARGV2}") endif(${ARGC} GREATER 2) + + if(EIGEN_TEST_CUSTOM_CXX_FLAGS) + ei_add_target_property(${targetname} COMPILE_FLAGS "${EIGEN_TEST_CUSTOM_CXX_FLAGS}") + endif() if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) target_link_libraries(${targetname} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) @@ -45,6 +51,9 @@ macro(ei_add_test_internal testname testname_with_suffix) if(EXTERNAL_LIBS) target_link_libraries(${targetname} ${EXTERNAL_LIBS}) endif() + if(EIGEN_TEST_CUSTOM_LINKER_FLAGS) + target_link_libraries(${targetname} ${EIGEN_TEST_CUSTOM_LINKER_FLAGS}) + endif() if(${ARGC} GREATER 3) set(libs_to_link ${ARGV3}) @@ -59,15 +68,11 @@ macro(ei_add_test_internal testname testname_with_suffix) endif() endif() - if(WIN32) - if(CYGWIN) - add_test(${testname_with_suffix} "${Eigen_SOURCE_DIR}/test/runtest.sh" "${testname_with_suffix}") - else(CYGWIN) - add_test(${testname_with_suffix} "${targetname}") - endif(CYGWIN) - else(WIN32) + if(EIGEN_BIN_BASH_EXISTS) add_test(${testname_with_suffix} "${Eigen_SOURCE_DIR}/test/runtest.sh" "${testname_with_suffix}") - endif(WIN32) + else() + add_test(${testname_with_suffix} "${targetname}") + endif() endmacro(ei_add_test_internal) @@ -120,9 +125,9 @@ macro(ei_add_test testname) file(READ "${testname}.cpp" test_source) set(parts 0) - string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+" + string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+" occurences "${test_source}") - string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_" "" suffixes "${occurences}") + string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}") list(REMOVE_DUPLICATES suffixes) if(EIGEN_SPLIT_LARGE_TESTS AND suffixes) add_custom_target(${testname}) @@ -181,12 +186,13 @@ endmacro(ei_add_failtest) # print a summary of the different options macro(ei_testing_print_summary) - message(STATUS "************************************************************") message(STATUS "*** Eigen's unit tests configuration summary ***") message(STATUS "************************************************************") message(STATUS "") message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") + message(STATUS "Build site: ${SITE}") + message(STATUS "Build string: ${BUILDNAME}") get_property(EIGEN_TESTING_SUMMARY GLOBAL PROPERTY EIGEN_TESTING_SUMMARY) get_property(EIGEN_TESTED_BACKENDS GLOBAL PROPERTY EIGEN_TESTED_BACKENDS) get_property(EIGEN_MISSING_BACKENDS GLOBAL PROPERTY EIGEN_MISSING_BACKENDS) @@ -204,6 +210,8 @@ macro(ei_testing_print_summary) elseif(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION) message(STATUS "Explicit vectorization disabled (alignment kept enabled)") else() + + message(STATUS "Maximal matrix/vector size: ${EIGEN_TEST_MAX_SIZE}") if(EIGEN_TEST_SSE2) message(STATUS "SSE2: ON") @@ -252,7 +260,6 @@ macro(ei_testing_print_summary) message(STATUS "\n${EIGEN_TESTING_SUMMARY}") message(STATUS "************************************************************") - endmacro(ei_testing_print_summary) macro(ei_init_testing) @@ -271,25 +278,200 @@ macro(ei_init_testing) set_property(GLOBAL PROPERTY EIGEN_FAILTEST_FAILURE_COUNT "0") set_property(GLOBAL PROPERTY EIGEN_FAILTEST_COUNT "0") + + # uncomment anytime you change the ei_get_compilerver_from_cxx_version_string macro + # ei_test_get_compilerver_from_cxx_version_string() endmacro(ei_init_testing) -if(CMAKE_COMPILER_IS_GNUCXX) - option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF) - if(EIGEN_COVERAGE_TESTING) - set(COVERAGE_FLAGS "-fprofile-arcs -ftest-coverage") - set(CTEST_CUSTOM_COVERAGE_EXCLUDE "/test/") - else(EIGEN_COVERAGE_TESTING) - set(COVERAGE_FLAGS "") - endif(EIGEN_COVERAGE_TESTING) - if(EIGEN_TEST_C++0x) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++0x") - endif(EIGEN_TEST_C++0x) - if(CMAKE_SYSTEM_NAME MATCHES Linux) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_FLAGS} -g2") - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${COVERAGE_FLAGS} -O2 -g2") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COVERAGE_FLAGS} -fno-inline-functions") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COVERAGE_FLAGS} -O0 -g3") - endif(CMAKE_SYSTEM_NAME MATCHES Linux) -elseif(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /D_CRT_SECURE_NO_WARNINGS /D_SCL_SECURE_NO_WARNINGS") -endif(CMAKE_COMPILER_IS_GNUCXX) +macro(ei_set_sitename) + # if the sitename is not yet set, try to set it + if(NOT ${SITE} OR ${SITE} STREQUAL "") + set(eigen_computername $ENV{COMPUTERNAME}) + set(eigen_hostname $ENV{HOSTNAME}) + if(eigen_hostname) + set(SITE ${eigen_hostname}) + elseif(eigen_computername) + set(SITE ${eigen_computername}) + endif() + endif() + # in case it is already set, enforce lower case + if(SITE) + string(TOLOWER ${SITE} SITE) + endif() +endmacro(ei_set_sitename) + +macro(ei_get_compilerver VAR) + if(MSVC) + # on windows system, we use a modified CMake script + include(CMakeDetermineVSServicePack) + DetermineVSServicePack( my_service_pack ) + + if( my_service_pack ) + set(${VAR} ${my_service_pack}) + else() + set(${VAR} "na") + endif() + else() + # on all other system we rely on ${CMAKE_CXX_COMPILER} + # supporting a "--version" flag + execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version + COMMAND head -n 1 + OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE) + + ei_get_compilerver_from_cxx_version_string(${eigen_cxx_compiler_version_string} CNAME CVER) + + set(${VAR} "${CNAME}-${CVER}") + endif() +endmacro(ei_get_compilerver) + +# Extract compiler name and version from a raw version string +# WARNING: if you edit thid macro, then please test it by uncommenting +# the testing macro call in ei_init_testing() of the EigenTesting.cmake file. +# See also the ei_test_get_compilerver_from_cxx_version_string macro at the end of the file +macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER) + # extract possible compiler names + string(REGEX MATCH "g\\+\\+" ei_has_gpp ${VERSTRING}) + string(REGEX MATCH "llvm|LLVM" ei_has_llvm ${VERSTRING}) + string(REGEX MATCH "gcc|GCC" ei_has_gcc ${VERSTRING}) + string(REGEX MATCH "icpc|ICC" ei_has_icpc ${VERSTRING}) + string(REGEX MATCH "clang|CLANG" ei_has_clang ${VERSTRING}) + + # combine them + if((ei_has_llvm) AND (ei_has_gpp OR ei_has_gcc)) + set(${CNAME} "llvm-g++") + elseif((ei_has_llvm) AND (ei_has_clang)) + set(${CNAME} "llvm-clang++") + elseif(ei_has_icpc) + set(${CNAME} "icpc") + elseif(ei_has_gpp OR ei_has_gcc) + set(${CNAME} "g++") + else() + set(${CNAME} "_") + endif() + + # extract possible version numbers + # first try to extract 3 isolated numbers: + string(REGEX MATCH " [0-9]+\\.[0-9]+\\.[0-9]+" eicver ${VERSTRING}) + if(NOT eicver) + # try to extract 2 isolated ones: + string(REGEX MATCH " [0-9]+\\.[0-9]+" eicver ${VERSTRING}) + if(NOT eicver) + # try to extract 3: + string(REGEX MATCH "[^0-9][0-9]+\\.[0-9]+\\.[0-9]+" eicver ${VERSTRING}) + if(NOT eicver) + # try to extract 2: + string(REGEX MATCH "[^0-9][0-9]+\\.[0-9]+" eicver ${VERSTRING}) + else() + set(eicver " _") + endif() + endif() + endif() + + string(REGEX REPLACE ".(.*)" "\\1" ${CVER} ${eicver}) + +endmacro(ei_get_compilerver_from_cxx_version_string) + +macro(ei_get_cxxflags VAR) + set(${VAR} "") + ei_is_64bit_env(IS_64BIT_ENV) + if(EIGEN_TEST_NEON) + set(${VAR} NEON) + elseif(EIGEN_TEST_ALTIVEC) + set(${VAR} ALVEC) + elseif(EIGEN_TEST_SSE4_2) + set(${VAR} SSE42) + elseif(EIGEN_TEST_SSE4_1) + set(${VAR} SSE41) + elseif(EIGEN_TEST_SSSE3) + set(${VAR} SSSE3) + elseif(EIGEN_TEST_SSE3) + set(${VAR} SSE3) + elseif(EIGEN_TEST_SSE2 OR IS_64BIT_ENV) + set(${VAR} SSE2) + endif() + + if(EIGEN_TEST_OPENMP) + if (${VAR} STREQUAL "") + set(${VAR} OMP) + else() + set(${VAR} ${${VAR}}-OMP) + endif() + endif() + + if(EIGEN_DEFAULT_TO_ROW_MAJOR) + if (${VAR} STREQUAL "") + set(${VAR} ROW) + else() + set(${VAR} ${${VAR}}-ROWMAJ) + endif() + endif() +endmacro(ei_get_cxxflags) + +macro(ei_set_build_string) + ei_get_compilerver(LOCAL_COMPILER_VERSION) + ei_get_cxxflags(LOCAL_COMPILER_FLAGS) + + include(EigenDetermineOSVersion) + DetermineOSVersion(OS_VERSION) + + set(TMP_BUILD_STRING ${OS_VERSION}-${LOCAL_COMPILER_VERSION}) + + if (NOT ${LOCAL_COMPILER_FLAGS} STREQUAL "") + set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-${LOCAL_COMPILER_FLAGS}) + endif() + + ei_is_64bit_env(IS_64BIT_ENV) + if(NOT IS_64BIT_ENV) + set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-32bit) + else() + set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-64bit) + endif() + + string(TOLOWER ${TMP_BUILD_STRING} BUILDNAME) +endmacro(ei_set_build_string) + +macro(ei_is_64bit_env VAR) + + file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/is64.cpp" + "int main() { return (sizeof(int*) == 8 ? 1 : 0); } + ") + try_run(run_res compile_res + ${CMAKE_CURRENT_BINARY_DIR} "${CMAKE_CURRENT_BINARY_DIR}/is64.cpp" + RUN_OUTPUT_VARIABLE run_output) + + if(compile_res AND run_res) + set(${VAR} ${run_res}) + elseif(CMAKE_CL_64) + set(${VAR} 1) + elseif("$ENV{Platform}" STREQUAL "X64") # nmake 64 bit + set(${VAR} 1) + endif() +endmacro(ei_is_64bit_env) + + +# helper macro for testing ei_get_compilerver_from_cxx_version_string +# STR: raw version string +# REFNAME: expected compiler name +# REFVER: expected compiler version +macro(ei_test1_get_compilerver_from_cxx_version_string STR REFNAME REFVER) + ei_get_compilerver_from_cxx_version_string(${STR} CNAME CVER) + if((NOT ${REFNAME} STREQUAL ${CNAME}) OR (NOT ${REFVER} STREQUAL ${CVER})) + message("STATUS ei_get_compilerver_from_cxx_version_string error:") + message("Expected \"${REFNAME}-${REFVER}\", got \"${CNAME}-${CVER}\"") + endif() +endmacro(ei_test1_get_compilerver_from_cxx_version_string) + +# macro for testing ei_get_compilerver_from_cxx_version_string +# feel free to add more version strings +macro(ei_test_get_compilerver_from_cxx_version_string) + ei_test1_get_compilerver_from_cxx_version_string("g++ (SUSE Linux) 4.5.3 20110428 [gcc-4_5-branch revision 173117]" "g++" "4.5.3") + ei_test1_get_compilerver_from_cxx_version_string("c++ (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)" "g++" "4.5.1") + ei_test1_get_compilerver_from_cxx_version_string("icpc (ICC) 11.0 20081105" "icpc" "11.0") + ei_test1_get_compilerver_from_cxx_version_string("g++-3.4 (GCC) 3.4.6" "g++" "3.4.6") + ei_test1_get_compilerver_from_cxx_version_string("SUSE Linux clang version 3.0 (branches/release_30 145598) (based on LLVM 3.0)" "llvm-clang++" "3.0") + ei_test1_get_compilerver_from_cxx_version_string("icpc (ICC) 12.0.5 20110719" "icpc" "12.0.5") + ei_test1_get_compilerver_from_cxx_version_string("Apple clang version 2.1 (tags/Apple/clang-163.7.1) (based on LLVM 3.0svn)" "llvm-clang++" "2.1") + ei_test1_get_compilerver_from_cxx_version_string("i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2335.15.00)" "llvm-g++" "4.2.1") + ei_test1_get_compilerver_from_cxx_version_string("g++-mp-4.4 (GCC) 4.4.6" "g++" "4.4.6") + ei_test1_get_compilerver_from_cxx_version_string("g++-mp-4.4 (GCC) 2011" "g++" "4.4") +endmacro(ei_test_get_compilerver_from_cxx_version_string) diff --git a/gtsam/3rdparty/Eigen/cmake/FindCholmod.cmake b/gtsam/3rdparty/Eigen/cmake/FindCholmod.cmake index a5e132b64..9095bea31 100644 --- a/gtsam/3rdparty/Eigen/cmake/FindCholmod.cmake +++ b/gtsam/3rdparty/Eigen/cmake/FindCholmod.cmake @@ -13,6 +13,7 @@ find_path(CHOLMOD_INCLUDES ${INCLUDE_INSTALL_DIR} PATH_SUFFIXES suitesparse + ufsparse ) find_library(CHOLMOD_LIBRARIES cholmod PATHS $ENV{CHOLMODDIR} ${LIB_INSTALL_DIR}) diff --git a/gtsam/3rdparty/Eigen/cmake/FindEigen2.cmake b/gtsam/3rdparty/Eigen/cmake/FindEigen2.cmake index da95bb0f5..a834b8872 100644 --- a/gtsam/3rdparty/Eigen/cmake/FindEigen2.cmake +++ b/gtsam/3rdparty/Eigen/cmake/FindEigen2.cmake @@ -39,11 +39,11 @@ macro(_eigen2_check_version) set(EIGEN2_MINOR_VERSION "${CMAKE_MATCH_1}") set(EIGEN2_VERSION ${EIGEN2_WORLD_VERSION}.${EIGEN2_MAJOR_VERSION}.${EIGEN2_MINOR_VERSION}) - if(${EIGEN2_VERSION} VERSION_LESS ${Eigen2_FIND_VERSION}) + if((${EIGEN2_WORLD_VERSION} NOTEQUAL 2) OR (${EIGEN2_MAJOR_VERSION} GREATER 10) OR (${EIGEN2_VERSION} VERSION_LESS ${Eigen2_FIND_VERSION})) set(EIGEN2_VERSION_OK FALSE) - else(${EIGEN2_VERSION} VERSION_LESS ${Eigen2_FIND_VERSION}) + else() set(EIGEN2_VERSION_OK TRUE) - endif(${EIGEN2_VERSION} VERSION_LESS ${Eigen2_FIND_VERSION}) + endif() if(NOT EIGEN2_VERSION_OK) diff --git a/gtsam/3rdparty/Eigen/cmake/FindFFTW.cmake b/gtsam/3rdparty/Eigen/cmake/FindFFTW.cmake index 58b10ea11..a9e9925e7 100644 --- a/gtsam/3rdparty/Eigen/cmake/FindFFTW.cmake +++ b/gtsam/3rdparty/Eigen/cmake/FindFFTW.cmake @@ -1,31 +1,119 @@ +# - Find the FFTW library +# +# Usage: +# find_package(FFTW [REQUIRED] [QUIET] ) +# +# It sets the following variables: +# FFTW_FOUND ... true if fftw is found on the system +# FFTW_LIBRARIES ... full path to fftw library +# FFTW_INCLUDES ... fftw include directory +# +# The following variables will be checked by the function +# FFTW_USE_STATIC_LIBS ... if true, only static libraries are found +# FFTW_ROOT ... if set, the libraries are exclusively searched +# under this path +# FFTW_LIBRARY ... fftw library to use +# FFTW_INCLUDE_DIR ... fftw include directory +# -if (FFTW_INCLUDES AND FFTW_LIBRARIES) - set(FFTW_FIND_QUIETLY TRUE) -endif (FFTW_INCLUDES AND FFTW_LIBRARIES) - -find_path(FFTW_INCLUDES - NAMES - fftw3.h - PATHS - $ENV{FFTWDIR} - ${INCLUDE_INSTALL_DIR} -) - -find_library(FFTWF_LIB NAMES fftw3f PATHS $ENV{FFTWDIR} ${LIB_INSTALL_DIR}) -find_library(FFTW_LIB NAMES fftw3 PATHS $ENV{FFTWDIR} ${LIB_INSTALL_DIR}) -set(FFTW_LIBRARIES "${FFTWF_LIB} ${FFTW_LIB}" ) - -find_library(FFTWL_LIB NAMES fftw3l PATHS $ENV{FFTWDIR} ${LIB_INSTALL_DIR}) - -if(FFTWL_LIB) -set(FFTW_LIBRARIES "${FFTW_LIBRARIES} ${FFTWL_LIB}") +#If environment variable FFTWDIR is specified, it has same effect as FFTW_ROOT +if( NOT FFTW_ROOT AND ENV{FFTWDIR} ) + set( FFTW_ROOT $ENV{FFTWDIR} ) endif() +# Check if we can use PkgConfig +find_package(PkgConfig) -message(STATUS "FFTW ${FFTW_LIBRARIES}" ) +#Determine from PKG +if( PKG_CONFIG_FOUND AND NOT FFTW_ROOT ) + pkg_check_modules( PKG_FFTW QUIET "fftw3" ) +endif() + +#Check whether to search static or dynamic libs +set( CMAKE_FIND_LIBRARY_SUFFIXES_SAV ${CMAKE_FIND_LIBRARY_SUFFIXES} ) + +if( ${FFTW_USE_STATIC_LIBS} ) + set( CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_STATIC_LIBRARY_SUFFIX} ) +else() + set( CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_SHARED_LIBRARY_SUFFIX} ) +endif() + +if( FFTW_ROOT ) + + #find libs + find_library( + FFTW_LIB + NAMES "fftw3" + PATHS ${FFTW_ROOT} + PATH_SUFFIXES "lib" "lib64" + NO_DEFAULT_PATH + ) + + find_library( + FFTWF_LIB + NAMES "fftw3f" + PATHS ${FFTW_ROOT} + PATH_SUFFIXES "lib" "lib64" + NO_DEFAULT_PATH + ) + + find_library( + FFTWL_LIB + NAMES "fftw3l" + PATHS ${FFTW_ROOT} + PATH_SUFFIXES "lib" "lib64" + NO_DEFAULT_PATH + ) + + #find includes + find_path( + FFTW_INCLUDES + NAMES "fftw3.h" + PATHS ${FFTW_ROOT} + PATH_SUFFIXES "include" + NO_DEFAULT_PATH + ) + +else() + + find_library( + FFTW_LIB + NAMES "fftw3" + PATHS ${PKG_FFTW_LIBRARY_DIRS} ${LIB_INSTALL_DIR} + ) + + find_library( + FFTWF_LIB + NAMES "fftw3f" + PATHS ${PKG_FFTW_LIBRARY_DIRS} ${LIB_INSTALL_DIR} + ) + + + find_library( + FFTWL_LIB + NAMES "fftw3l" + PATHS ${PKG_FFTW_LIBRARY_DIRS} ${LIB_INSTALL_DIR} + ) + + find_path( + FFTW_INCLUDES + NAMES "fftw3.h" + PATHS ${PKG_FFTW_INCLUDE_DIRS} ${INCLUDE_INSTALL_DIR} + ) + +endif( FFTW_ROOT ) + +set(FFTW_LIBRARIES ${FFTW_LIB} ${FFTWF_LIB}) + +if(FFTWL_LIB) + set(FFTW_LIBRARIES ${FFTW_LIBRARIES} ${FFTWL_LIB}) +endif() + +set( CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES_SAV} ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(FFTW DEFAULT_MSG FFTW_INCLUDES FFTW_LIBRARIES) mark_as_advanced(FFTW_INCLUDES FFTW_LIBRARIES) + diff --git a/gtsam/3rdparty/Eigen/cmake/FindMetis.cmake b/gtsam/3rdparty/Eigen/cmake/FindMetis.cmake new file mode 100644 index 000000000..e4d6ef258 --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/FindMetis.cmake @@ -0,0 +1,24 @@ +# Pastix requires METIS or METIS (partitioning and reordering tools) + +if (METIS_INCLUDES AND METIS_LIBRARIES) + set(METIS_FIND_QUIETLY TRUE) +endif (METIS_INCLUDES AND METIS_LIBRARIES) + +find_path(METIS_INCLUDES + NAMES + metis.h + PATHS + $ENV{METISDIR} + ${INCLUDE_INSTALL_DIR} + PATH_SUFFIXES + metis +) + + +find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(METIS DEFAULT_MSG + METIS_INCLUDES METIS_LIBRARIES) + +mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES) diff --git a/gtsam/3rdparty/Eigen/cmake/FindPastix.cmake b/gtsam/3rdparty/Eigen/cmake/FindPastix.cmake new file mode 100644 index 000000000..e2e6c810d --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/FindPastix.cmake @@ -0,0 +1,25 @@ +# Pastix lib requires linking to a blas library. +# It is up to the user of this module to find a BLAS and link to it. +# Pastix requires SCOTCH or METIS (partitioning and reordering tools) as well + +if (PASTIX_INCLUDES AND PASTIX_LIBRARIES) + set(PASTIX_FIND_QUIETLY TRUE) +endif (PASTIX_INCLUDES AND PASTIX_LIBRARIES) + +find_path(PASTIX_INCLUDES + NAMES + pastix_nompi.h + PATHS + $ENV{PASTIXDIR} + ${INCLUDE_INSTALL_DIR} +) + +find_library(PASTIX_LIBRARIES pastix PATHS $ENV{PASTIXDIR} ${LIB_INSTALL_DIR}) + + + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(PASTIX DEFAULT_MSG + PASTIX_INCLUDES PASTIX_LIBRARIES) + +mark_as_advanced(PASTIX_INCLUDES PASTIX_LIBRARIES) diff --git a/gtsam/3rdparty/Eigen/cmake/FindScotch.cmake b/gtsam/3rdparty/Eigen/cmake/FindScotch.cmake new file mode 100644 index 000000000..530340b16 --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/FindScotch.cmake @@ -0,0 +1,24 @@ +# Pastix requires SCOTCH or METIS (partitioning and reordering tools) + +if (SCOTCH_INCLUDES AND SCOTCH_LIBRARIES) + set(SCOTCH_FIND_QUIETLY TRUE) +endif (SCOTCH_INCLUDES AND SCOTCH_LIBRARIES) + +find_path(SCOTCH_INCLUDES + NAMES + scotch.h + PATHS + $ENV{SCOTCHDIR} + ${INCLUDE_INSTALL_DIR} + PATH_SUFFIXES + scotch +) + + +find_library(SCOTCH_LIBRARIES scotch PATHS $ENV{SCOTCHDIR} ${LIB_INSTALL_DIR}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(SCOTCH DEFAULT_MSG + SCOTCH_INCLUDES SCOTCH_LIBRARIES) + +mark_as_advanced(SCOTCH_INCLUDES SCOTCH_LIBRARIES) diff --git a/gtsam/3rdparty/Eigen/cmake/FindUmfpack.cmake b/gtsam/3rdparty/Eigen/cmake/FindUmfpack.cmake index 4b6f24f93..d42c3c4a2 100644 --- a/gtsam/3rdparty/Eigen/cmake/FindUmfpack.cmake +++ b/gtsam/3rdparty/Eigen/cmake/FindUmfpack.cmake @@ -13,6 +13,7 @@ find_path(UMFPACK_INCLUDES ${INCLUDE_INSTALL_DIR} PATH_SUFFIXES suitesparse + ufsparse ) find_library(UMFPACK_LIBRARIES umfpack PATHS $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) diff --git a/gtsam/3rdparty/Eigen/cmake/language_support.cmake b/gtsam/3rdparty/Eigen/cmake/language_support.cmake new file mode 100644 index 000000000..3414e6ea6 --- /dev/null +++ b/gtsam/3rdparty/Eigen/cmake/language_support.cmake @@ -0,0 +1,64 @@ +# cmake/modules/language_support.cmake +# +# Temporary additional general language support is contained within this +# file. + +# This additional function definition is needed to provide a workaround for +# CMake bug 9220. + +# On debian testing (cmake 2.6.2), I get return code zero when calling +# cmake the first time, but cmake crashes when running a second time +# as follows: +# +# -- The Fortran compiler identification is unknown +# CMake Error at /usr/share/cmake-2.6/Modules/CMakeFortranInformation.cmake:7 (GET_FILENAME_COMPONENT): +# get_filename_component called with incorrect number of arguments +# Call Stack (most recent call first): +# CMakeLists.txt:3 (enable_language) +# +# My workaround is to invoke cmake twice. If both return codes are zero, +# it is safe to invoke ENABLE_LANGUAGE(Fortran OPTIONAL) + +function(workaround_9220 language language_works) + #message("DEBUG: language = ${language}") + set(text + "project(test NONE) + cmake_minimum_required(VERSION 2.6.0) + enable_language(${language} OPTIONAL) + ") + file(REMOVE_RECURSE ${CMAKE_BINARY_DIR}/language_tests/${language}) + file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language}) + file(WRITE ${CMAKE_BINARY_DIR}/language_tests/${language}/CMakeLists.txt + ${text}) + execute_process( + COMMAND ${CMAKE_COMMAND} . + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language} + RESULT_VARIABLE return_code + OUTPUT_QUIET + ERROR_QUIET + ) + + if(return_code EQUAL 0) + # Second run + execute_process ( + COMMAND ${CMAKE_COMMAND} . + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language} + RESULT_VARIABLE return_code + OUTPUT_QUIET + ERROR_QUIET + ) + if(return_code EQUAL 0) + set(${language_works} ON PARENT_SCOPE) + else(return_code EQUAL 0) + set(${language_works} OFF PARENT_SCOPE) + endif(return_code EQUAL 0) + else(return_code EQUAL 0) + set(${language_works} OFF PARENT_SCOPE) + endif(return_code EQUAL 0) +endfunction(workaround_9220) + +# Temporary tests of the above function. +#workaround_9220(CXX CXX_language_works) +#message("CXX_language_works = ${CXX_language_works}") +#workaround_9220(CXXp CXXp_language_works) +#message("CXXp_language_works = ${CXXp_language_works}") diff --git a/gtsam/3rdparty/Eigen/debug/gdb/printers.py b/gtsam/3rdparty/Eigen/debug/gdb/printers.py index 02823b8a2..8dab8fffb 100644 --- a/gtsam/3rdparty/Eigen/debug/gdb/printers.py +++ b/gtsam/3rdparty/Eigen/debug/gdb/printers.py @@ -31,9 +31,15 @@ # To use it: # -# * create a directory and put the file as well as an empty __init__.py in that directory +# * Create a directory and put the file as well as an empty __init__.py in +# that directory. # * Create a ~/.gdbinit file, that contains the following: - +# python +# import sys +# sys.path.insert(0, '/path/to/eigen/printer/directory') +# from printers import register_eigen_printers +# register_eigen_printers (None) +# end import gdb import re @@ -41,10 +47,14 @@ import itertools class EigenMatrixPrinter: - "Print Eigen Matrix of some kind" + "Print Eigen Matrix or Array of some kind" - def __init__(self, val): + def __init__(self, variety, val): "Extract all the necessary information" + + # Save the variety (presumably "Matrix" or "Array") for later usage + self.variety = variety + # The gdb extension does not support value template arguments - need to extract them by hand type = val.type if type.code == gdb.TYPE_CODE_REF: @@ -55,7 +65,7 @@ class EigenMatrixPrinter: m = regex.findall(tag)[0][1:-1] template_params = m.split(',') template_params = map(lambda x:x.replace(" ", ""), template_params) - + if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001': self.rows = val['m_storage']['m_rows'] else: @@ -71,9 +81,9 @@ class EigenMatrixPrinter: self.options = template_params[3]; self.rowMajor = (int(self.options) & 0x1) - + self.innerType = self.type.template_argument(0) - + self.val = val # Fixed size matrices have a struct as their storage, so we need to walk through this @@ -90,12 +100,12 @@ class EigenMatrixPrinter: self.currentRow = 0 self.currentCol = 0 self.rowMajor = rowMajor - + def __iter__ (self): return self - + def next(self): - + row = self.currentRow col = self.currentCol if self.rowMajor == 0: @@ -115,7 +125,7 @@ class EigenMatrixPrinter: self.currentCol = 0 self.currentRow = self.currentRow + 1 - + item = self.dataPtr.dereference() self.dataPtr = self.dataPtr + 1 if (self.cols == 1): #if it's a column vector @@ -123,17 +133,17 @@ class EigenMatrixPrinter: elif (self.rows == 1): #if it's a row vector return ('[%d]' % (col,), item) return ('[%d,%d]' % (row, col), item) - + def children(self): return self._iterator(self.rows, self.cols, self.data, self.rowMajor) - + def to_string(self): - return "Eigen::Matrix<%s,%d,%d,%s> (data ptr: %s)" % (self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data) + return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data) class EigenQuaternionPrinter: "Print an Eigen Quaternion" - + def __init__(self, val): "Extract all the necessary information" # The gdb extension does not support value template arguments - need to extract them by hand @@ -153,18 +163,18 @@ class EigenQuaternionPrinter: self.dataPtr = dataPtr self.currentElement = 0 self.elementNames = ['x', 'y', 'z', 'w'] - + def __iter__ (self): return self - + def next(self): element = self.currentElement - + if self.currentElement >= 4: #there are 4 elements in a quanternion raise StopIteration self.currentElement = self.currentElement + 1 - + item = self.dataPtr.dereference() self.dataPtr = self.dataPtr + 1 return ('[%s]' % (self.elementNames[element],), item) @@ -172,13 +182,14 @@ class EigenQuaternionPrinter: def children(self): return self._iterator(self.data) - + def to_string(self): return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data) def build_eigen_dictionary (): pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val) - pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter(val) + pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val) + pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val) def register_eigen_printers(obj): "Register eigen pretty-printers with objfile Obj" @@ -189,22 +200,22 @@ def register_eigen_printers(obj): def lookup_function(val): "Look-up and return a pretty-printer that can print va." - + type = val.type - + if type.code == gdb.TYPE_CODE_REF: type = type.target() type = type.unqualified().strip_typedefs() - + typename = type.tag if typename == None: return None - + for function in pretty_printers_dict: if function.search(typename): return pretty_printers_dict[function](val) - + return None pretty_printers_dict = {} diff --git a/gtsam/3rdparty/Eigen/debug/msvc/eigen_autoexp_part.dat b/gtsam/3rdparty/Eigen/debug/msvc/eigen_autoexp_part.dat index ba7eefc8e..07aa43739 100644 --- a/gtsam/3rdparty/Eigen/debug/msvc/eigen_autoexp_part.dat +++ b/gtsam/3rdparty/Eigen/debug/msvc/eigen_autoexp_part.dat @@ -1,295 +1,295 @@ -; *************************************************************** -; * Eigen Visualizer -; * -; * Author: Hauke Heibel -; * -; * Support the enhanced debugging of the following Eigen -; * types (*: any, +:fixed dimension) : -; * -; * - Eigen::Matrix<*,4,1,*,*,*> and Eigen::Matrix<*,1,4,*,*,*> -; * - Eigen::Matrix<*,3,1,*,*,*> and Eigen::Matrix<*,1,3,*,*,*> -; * - Eigen::Matrix<*,2,1,*,*,*> and Eigen::Matrix<*,1,2,*,*,*> -; * - Eigen::Matrix<*,-1,-1,*,*,*> -; * - Eigen::Matrix<*,+,-1,*,*,*> -; * - Eigen::Matrix<*,-1,+,*,*,*> -; * - Eigen::Matrix<*,+,+,*,*,*> -; * -; * Matrices are displayed properly independantly of the memory -; * alignment (RowMajor vs. ColMajor). -; * -; * This file is distributed WITHOUT ANY WARRANTY. Please ensure -; * that your original autoexp.dat file is copied to a safe -; * place before proceeding with its modification. -; *************************************************************** - -[Visualizer] - -; Fixed size 4-vectors -Eigen::Matrix<*,4,1,*,*,*>|Eigen::Matrix<*,1,4,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - x : ($c.m_storage.m_data.array)[0], - y : ($c.m_storage.m_data.array)[1], - z : ($c.m_storage.m_data.array)[2], - w : ($c.m_storage.m_data.array)[3] - ) - ) - - preview - ( - #( - "[", - 4, - "](", - #array(expr: $e.m_storage.m_data.array[$i], size: 4), - ")" - ) - ) -} - -; Fixed size 3-vectors -Eigen::Matrix<*,3,1,*,*,*>|Eigen::Matrix<*,1,3,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - x : ($c.m_storage.m_data.array)[0], - y : ($c.m_storage.m_data.array)[1], - z : ($c.m_storage.m_data.array)[2] - ) - ) - - preview - ( - #( - "[", - 3, - "](", - #array(expr: $e.m_storage.m_data.array[$i], size: 3), - ")" - ) - ) -} - -; Fixed size 2-vectors -Eigen::Matrix<*,2,1,*,*,*>|Eigen::Matrix<*,1,2,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - x : ($c.m_storage.m_data.array)[0], - y : ($c.m_storage.m_data.array)[1] - ) - ) - - preview - ( - #( - "[", - 2, - "](", - #array(expr: $e.m_storage.m_data.array[$i], size: 2), - ")" - ) - ) -} - -; Fixed size 1-vectors -Eigen::Matrix<*,1,1,*,*,*>|Eigen::Matrix<*,1,1,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - x : ($c.m_storage.m_data.array)[0] - ) - ) - - preview - ( - #( - "[", - 1, - "](", - #array(expr: $e.m_storage.m_data.array[$i], size: 1), - ")" - ) - ) -} - -; Dynamic matrices (ColMajor and RowMajor support) -Eigen::Matrix<*,-1,-1,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - rows: $c.m_storage.m_rows, - cols: $c.m_storage.m_cols, - ; Check for RowMajorBit - #if ($c.Flags & 0x1) ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data)[($i % $c.m_storage.m_rows)*$c.m_storage.m_cols + (($i- $i % $c.m_storage.m_rows)/$c.m_storage.m_rows)], - size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols - ) - ) #else ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data)[$i], - size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols - ) - ) - ) - ) - - preview - ( - #( - "[", - $c.m_storage.m_rows, - ",", - $c.m_storage.m_cols, - "](", - #array( - expr : [($c.m_storage.m_data)[$i],g], - size : $c.m_storage.m_rows*$c.m_storage.m_cols - ), - ")" - ) - ) -} - -; Fixed rows, dynamic columns matrix (ColMajor and RowMajor support) -Eigen::Matrix<*,*,-1,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - rows: $c.RowsAtCompileTime, - cols: $c.m_storage.m_cols, - ; Check for RowMajorBit - #if ($c.Flags & 0x1) ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data)[($i % $c.RowsAtCompileTime)*$c.m_storage.m_cols + (($i- $i % $c.RowsAtCompileTime)/$c.RowsAtCompileTime)], - size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.m_storage.m_cols - ) - ) #else ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data)[$i], - size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.m_storage.m_cols - ) - ) - ) - ) - - preview - ( - #( - "[", - $c.RowsAtCompileTime, - ",", - $c.m_storage.m_cols, - "](", - #array( - expr : [($c.m_storage.m_data)[$i],g], - size : $c.RowsAtCompileTime*$c.m_storage.m_cols - ), - ")" - ) - ) -} - -; Dynamic rows, fixed columns matrix (ColMajor and RowMajor support) -Eigen::Matrix<*,-1,*,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - rows: $c.m_storage.m_rows, - cols: $c.ColsAtCompileTime, - ; Check for RowMajorBit - #if ($c.Flags & 0x1) ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data)[($i % $c.m_storage.m_rows)*$c.ColsAtCompileTime + (($i- $i % $c.m_storage.m_rows)/$c.m_storage.m_rows)], - size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime - ) - ) #else ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data)[$i], - size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime - ) - ) - ) - ) - - preview - ( - #( - "[", - $c.m_storage.m_rows, - ",", - $c.ColsAtCompileTime, - "](", - #array( - expr : [($c.m_storage.m_data)[$i],g], - size : $c.m_storage.m_rows*$c.ColsAtCompileTime - ), - ")" - ) - ) -} - -; Fixed size matrix (ColMajor and RowMajor support) -Eigen::Matrix<*,*,*,*,*,*>{ - children - ( - #( - [internals]: [$c,!], - rows: $c.RowsAtCompileTime, - cols: $c.ColsAtCompileTime, - ; Check for RowMajorBit - #if ($c.Flags & 0x1) ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data.array)[($i % $c.RowsAtCompileTime)*$c.ColsAtCompileTime + (($i- $i % $c.RowsAtCompileTime)/$c.RowsAtCompileTime)], - size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.ColsAtCompileTime - ) - ) #else ( - #array( - rank: 2, - base: 0, - expr: ($c.m_storage.m_data.array)[$i], - size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.ColsAtCompileTime - ) - ) - ) - ) - - preview - ( - #( - "[", - $c.RowsAtCompileTime, - ",", - $c.ColsAtCompileTime, - "](", - #array( - expr : [($c.m_storage.m_data.array)[$i],g], - size : $c.RowsAtCompileTime*$c.ColsAtCompileTime - ), - ")" - ) - ) -} +; *************************************************************** +; * Eigen Visualizer +; * +; * Author: Hauke Heibel +; * +; * Support the enhanced debugging of the following Eigen +; * types (*: any, +:fixed dimension) : +; * +; * - Eigen::Matrix<*,4,1,*,*,*> and Eigen::Matrix<*,1,4,*,*,*> +; * - Eigen::Matrix<*,3,1,*,*,*> and Eigen::Matrix<*,1,3,*,*,*> +; * - Eigen::Matrix<*,2,1,*,*,*> and Eigen::Matrix<*,1,2,*,*,*> +; * - Eigen::Matrix<*,-1,-1,*,*,*> +; * - Eigen::Matrix<*,+,-1,*,*,*> +; * - Eigen::Matrix<*,-1,+,*,*,*> +; * - Eigen::Matrix<*,+,+,*,*,*> +; * +; * Matrices are displayed properly independantly of the memory +; * alignment (RowMajor vs. ColMajor). +; * +; * This file is distributed WITHOUT ANY WARRANTY. Please ensure +; * that your original autoexp.dat file is copied to a safe +; * place before proceeding with its modification. +; *************************************************************** + +[Visualizer] + +; Fixed size 4-vectors +Eigen::Matrix<*,4,1,*,*,*>|Eigen::Matrix<*,1,4,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0], + y : ($c.m_storage.m_data.array)[1], + z : ($c.m_storage.m_data.array)[2], + w : ($c.m_storage.m_data.array)[3] + ) + ) + + preview + ( + #( + "[", + 4, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 4), + ")" + ) + ) +} + +; Fixed size 3-vectors +Eigen::Matrix<*,3,1,*,*,*>|Eigen::Matrix<*,1,3,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0], + y : ($c.m_storage.m_data.array)[1], + z : ($c.m_storage.m_data.array)[2] + ) + ) + + preview + ( + #( + "[", + 3, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 3), + ")" + ) + ) +} + +; Fixed size 2-vectors +Eigen::Matrix<*,2,1,*,*,*>|Eigen::Matrix<*,1,2,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0], + y : ($c.m_storage.m_data.array)[1] + ) + ) + + preview + ( + #( + "[", + 2, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 2), + ")" + ) + ) +} + +; Fixed size 1-vectors +Eigen::Matrix<*,1,1,*,*,*>|Eigen::Matrix<*,1,1,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0] + ) + ) + + preview + ( + #( + "[", + 1, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 1), + ")" + ) + ) +} + +; Dynamic matrices (ColMajor and RowMajor support) +Eigen::Matrix<*,-1,-1,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.m_storage.m_rows, + cols: $c.m_storage.m_cols, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[($i % $c.m_storage.m_rows)*$c.m_storage.m_cols + (($i- $i % $c.m_storage.m_rows)/$c.m_storage.m_rows)], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[$i], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.m_storage.m_rows, + ",", + $c.m_storage.m_cols, + "](", + #array( + expr : [($c.m_storage.m_data)[$i],g], + size : $c.m_storage.m_rows*$c.m_storage.m_cols + ), + ")" + ) + ) +} + +; Fixed rows, dynamic columns matrix (ColMajor and RowMajor support) +Eigen::Matrix<*,*,-1,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.RowsAtCompileTime, + cols: $c.m_storage.m_cols, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[($i % $c.RowsAtCompileTime)*$c.m_storage.m_cols + (($i- $i % $c.RowsAtCompileTime)/$c.RowsAtCompileTime)], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.m_storage.m_cols + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[$i], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.m_storage.m_cols + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.RowsAtCompileTime, + ",", + $c.m_storage.m_cols, + "](", + #array( + expr : [($c.m_storage.m_data)[$i],g], + size : $c.RowsAtCompileTime*$c.m_storage.m_cols + ), + ")" + ) + ) +} + +; Dynamic rows, fixed columns matrix (ColMajor and RowMajor support) +Eigen::Matrix<*,-1,*,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.m_storage.m_rows, + cols: $c.ColsAtCompileTime, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[($i % $c.m_storage.m_rows)*$c.ColsAtCompileTime + (($i- $i % $c.m_storage.m_rows)/$c.m_storage.m_rows)], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[$i], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.m_storage.m_rows, + ",", + $c.ColsAtCompileTime, + "](", + #array( + expr : [($c.m_storage.m_data)[$i],g], + size : $c.m_storage.m_rows*$c.ColsAtCompileTime + ), + ")" + ) + ) +} + +; Fixed size matrix (ColMajor and RowMajor support) +Eigen::Matrix<*,*,*,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.RowsAtCompileTime, + cols: $c.ColsAtCompileTime, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data.array)[($i % $c.RowsAtCompileTime)*$c.ColsAtCompileTime + (($i- $i % $c.RowsAtCompileTime)/$c.RowsAtCompileTime)], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.ColsAtCompileTime + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data.array)[$i], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.ColsAtCompileTime + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.RowsAtCompileTime, + ",", + $c.ColsAtCompileTime, + "](", + #array( + expr : [($c.m_storage.m_data.array)[$i],g], + size : $c.RowsAtCompileTime*$c.ColsAtCompileTime + ), + ")" + ) + ) +} diff --git a/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox b/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox index ad772b2e1..8534cb0c3 100644 --- a/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox +++ b/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox @@ -25,6 +25,10 @@ There is no library to link to. The only thing that you need to keep in mind whe \code g++ -I /path/to/eigen/ my_program.cpp -o my_program \endcode +On Linux or Mac OS X, another option is to symlink or copy the Eigen folder into /usr/local/include/. This way, you can compile the program with: + +\code g++ my_program.cpp -o my_program \endcode + When you run the program, it produces the following output: \include QuickStart_example.out diff --git a/gtsam/3rdparty/Eigen/doc/C06_TutorialLinearAlgebra.dox b/gtsam/3rdparty/Eigen/doc/C06_TutorialLinearAlgebra.dox index 77f13f4a0..e8b3b7953 100644 --- a/gtsam/3rdparty/Eigen/doc/C06_TutorialLinearAlgebra.dox +++ b/gtsam/3rdparty/Eigen/doc/C06_TutorialLinearAlgebra.dox @@ -144,6 +144,9 @@ You need an eigendecomposition here, see available such decompositions on \ref T Make sure to check if your matrix is self-adjoint, as is often the case in these problems. Here's an example using SelfAdjointEigenSolver, it could easily be adapted to general matrices using EigenSolver or ComplexEigenSolver. +The computation of eigenvalues and eigenvectors does not necessarily converge, but such failure to converge is +very rare. The call to info() is to check for this possibility. + diff --git a/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox b/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox index e58ff6e2c..f3879b8b9 100644 --- a/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox +++ b/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox @@ -191,12 +191,27 @@ This can be accomplished with: \verbinclude Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple.out
Example:Output:
+We can interpret the instruction mat.colwise() += v in two equivalent ways. It adds the vector \c v +to every column of the matrix. Alternatively, it can be interpreted as repeating the vector \c v four times to +form a four-by-two matrix which is then added to \c mat: +\f[ +\begin{bmatrix} 1 & 2 & 6 & 9 \\ 3 & 1 & 7 & 2 \end{bmatrix} ++ \begin{bmatrix} 0 & 0 & 0 & 0 \\ 1 & 1 & 1 & 1 \end{bmatrix} += \begin{bmatrix} 1 & 2 & 6 & 9 \\ 4 & 2 & 8 & 3 \end{bmatrix}. +\f] +The operators -=, + and - can also be used column-wise and row-wise. On arrays, we +can also use the operators *=, /=, * and / to perform coefficient-wise +multiplication and division column-wise or row-wise. These operators are not available on matrices because it +is not clear what they would do. If you want multiply column 0 of a matrix \c mat with \c v(0), column 1 with +\c v(1), and so on, then use mat = mat * v.asDiagonal(). + It is important to point out that the vector to be added column-wise or row-wise must be of type Vector, and cannot be a Matrix. If this is not met then you will get compile-time error. This also means that broadcasting operations can only be applied with an object of type Vector, when operating with Matrix. -The same applies for the Array class, where the equivalent for VectorXf is ArrayXf. +The same applies for the Array class, where the equivalent for VectorXf is ArrayXf. As always, you should +not mix arrays and matrices in the same expression. -Therefore, to perform the same operation row-wise we can do: +To perform the same operation row-wise we can do: diff --git a/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox b/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox index 452abda10..b9e9eba12 100644 --- a/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox +++ b/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox @@ -45,16 +45,17 @@ But note that unfortunately, because of how C++ works, you can \b not do this: Rotation2D rot2(angle_in_radian);\endcode +AngleAxis aa(angle_in_radian, Vector3f(ax,ay,az));\endcode +The axis vector must be normalized. +Scaling(sx, sy) +Scaling(sx, sy, sz) +Scaling(s) +Scaling(vecN)\endcode +Transform t = Translation3f(p) * AngleAxisf(a,axis) * Scaling(s);\endcode +Matrix t = Rotation2Df(a) * Scaling(s); +Matrix t = AngleAxisf(a,axis) * Scaling(s);\endcode
Example:Output:
3D rotation as an \ref AngleAxis "angle + axis"\code -AngleAxis aa(angle_in_radian, Vector3f(ax,ay,az));\endcode
3D rotation as a \ref Quaternion "quaternion"\code Quaternion q; q = AngleAxis(angle_in_radian, axis);\endcode
N-D Scaling\code -Scaling(sx, sy) -Scaling(sx, sy, sz) -Scaling(s) -Scaling(vecN)\endcode
N-D Translation\code Translation(tx, ty) @@ -64,13 +65,13 @@ Translation(vecN)\endcode
N-D \ref TutorialGeoTransform "Affine transformation"\code Transform t = concatenation_of_any_transformations; -Transform t = Translation3f(p) * AngleAxisf(a,axis) * Scaling3f(s);\endcode
N-D Linear transformations \n (pure rotations, \n scaling, etc.)\code Matrix t = concatenation_of_rotations_and_scalings; -Matrix t = Rotation2Df(a) * Scaling2f(s); -Matrix t = AngleAxisf(a,axis) * Scaling3f(s);\endcode
Notes on rotations\n To transform more than a single vector the preferred @@ -92,8 +93,8 @@ Rotation2Df r; r = Matrix2f(..); // assumes a pure rotation matrix AngleAxisf aa; aa = Quaternionf(..); AngleAxisf aa; aa = Matrix3f(..); // assumes a pure rotation matrix Matrix2f m; m = Rotation2Df(..); -Matrix3f m; m = Quaternionf(..); Matrix3f m; m = Scaling3f(..); -Affine3f m; m = AngleAxis3f(..); Affine3f m; m = Scaling3f(..); +Matrix3f m; m = Quaternionf(..); Matrix3f m; m = Scaling(..); +Affine3f m; m = AngleAxis3f(..); Affine3f m; m = Scaling(..); Affine3f m; m = Translation3f(..); Affine3f m; m = Matrix3f(..); \endcode
@@ -207,10 +208,10 @@ t.scale(s); t.prescale(Vector_(sx,sy,..)); t.prescale(s); \endcode\code -t *= Scaling_(sx,sy,..); -t *= Scaling_(s); -t = Scaling_(sx,sy,..) * t; -t = Scaling_(s) * t; +t *= Scaling(sx,sy,..); +t *= Scaling(s); +t = Scaling(sx,sy,..) * t; +t = Scaling(s) * t; \endcode Shear transformation \n ( \b 2D \b only ! )\code t.shear(sx,sy); @@ -224,7 +225,7 @@ Note that in both API, any many transformations can be concatenated in a single t.pretranslate(..).rotate(..).translate(..).scale(..); \endcode \code -t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling_(..); +t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling(..); \endcode diff --git a/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox b/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox index 737f4cc09..34154bd0d 100644 --- a/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox +++ b/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox @@ -8,82 +8,144 @@ namespace Eigen { \b Table \b of \b contents \n - \ref TutorialSparseIntro + - \ref TutorialSparseExample "Example" + - \ref TutorialSparseSparseMatrix - \ref TutorialSparseFilling - - \ref TutorialSparseFeatureSet - \ref TutorialSparseDirectSolvers + - \ref TutorialSparseFeatureSet + - \ref TutorialSparse_BasicOps + - \ref TutorialSparse_Products + - \ref TutorialSparse_TriangularSelfadjoint + - \ref TutorialSparse_Submat + +
-\section TutorialSparseIntro Sparse matrix representations +Manipulating and solving sparse problems involves various modules which are summarized below: -In many applications (e.g., finite element methods) it is common to deal with very large matrices where only a few coefficients are different than zero. Both in term of memory consumption and performance, it is fundamental to use an adequate representation storing only nonzero coefficients. Such a matrix is called a sparse matrix. - -\b Declaring \b sparse \b matrices \b and \b vectors \n -The SparseMatrix class is the main sparse matrix representation of the Eigen's sparse module which offers high performance, low memory usage, and compatibility with most of sparse linear algebra packages. Because of its limited flexibility, we also provide a DynamicSparseMatrix variante taillored for low-level sparse matrix assembly. Both of them can be either row major or column major: - -\code -#include -SparseMatrix > m1(1000,2000); // declare a 1000x2000 col-major compressed sparse matrix of complex -SparseMatrix m2(1000,2000); // declare a 1000x2000 row-major compressed sparse matrix of double -DynamicSparseMatrix > m1(1000,2000); // declare a 1000x2000 col-major dynamic sparse matrix of complex -DynamicSparseMatrix m2(1000,2000); // declare a 1000x2000 row-major dynamic sparse matrix of double -\endcode - -Although a sparse matrix could also be used to represent a sparse vector, for that purpose it is better to use the specialized SparseVector class: -\code -SparseVector > v1(1000); // declare a column sparse vector of complex of size 1000 -SparseVector v2(1000); // declare a row sparse vector of double of size 1000 -\endcode -Note that here the size of a vector denotes its dimension and not the number of nonzero coefficients which is initially zero (like sparse matrices). - - -\b Overview \b of \b the \b internal \b sparse \b storage \n -In order to get the best of the Eigen's sparse objects, it is important to have a rough idea of the way they are internally stored. The SparseMatrix class implements the common and generic Compressed Column/Row Storage scheme. It consists of three compact arrays storing the values with their respective inner coordinates, and pointer indices to the begining of each outer vector. For instance, let \c m be a column-major sparse matrix. Then its nonzero coefficients are sequentially stored in memory in a column-major order (\em values). A second array of integer stores the respective row index of each coefficient (\em inner \em indices). Finally, a third array of integer, having the same length than the number of columns, stores the index in the previous arrays of the first element of each column (\em outer \em indices). - -Here is an example, with the matrix: - - - - - + + + + +
03000
2200017
75010
00000
001408
ModuleHeader fileContents
\link Sparse_Module SparseCore \endlink\code#include \endcodeSparseMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse triangular solvers)
\link SparseCholesky_Module SparseCholesky \endlink\code#include \endcodeDirect sparse LLT and LDLT Cholesky factorization to solve sparse self-adjoint positive definite problems
\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlink\code#include \endcodeIterative solvers to solve large general linear square problems (including self-adjoint positive definite problems)
\code#include \endcodeIncludes all the above modules
-and its internal representation using the Compressed Column Storage format: +\section TutorialSparseIntro Sparse matrix representation + +In many applications (e.g., finite element methods) it is common to deal with very large matrices where only a few coefficients are different from zero. In such cases, memory consumption can be reduced and performance increased by using a specialized representation storing only the nonzero coefficients. Such a matrix is called a sparse matrix. + +\b The \b %SparseMatrix \b class + +The class SparseMatrix is the main sparse matrix representation of Eigen's sparse module; it offers high performance and low memory usage. +It implements a more versatile variant of the widely-used Compressed Column (or Row) Storage scheme. +It consists of four compact arrays: + - \c Values: stores the coefficient values of the non-zeros. + - \c InnerIndices: stores the row (resp. column) indices of the non-zeros. + - \c OuterStarts: stores for each column (resp. row) the index of the first non-zero in the previous two arrays. + - \c InnerNNZs: stores the number of non-zeros of each column (resp. row). +The word \c inner refers to an \em inner \em vector that is a column for a column-major matrix, or a row for a row-major matrix. +The word \c outer refers to the other direction. + +This storage scheme is better explained on an example. The following matrix + + + + + + +
03 00 0
220 0017
75 01 0
00 00 0
00140 8
+ +and one of its possible sparse, \b column \b major representation: + + + +
Values: 227_3514__1_178
InnerIndices: 12_02 4__2_ 14
+ + + +
OuterStarts:035810\em 12
InnerNNZs: 2211 2
+ +Currently the elements of a given inner vector are guaranteed to be always sorted by increasing inner indices. +The \c "_" indicates available free space to quickly insert new elements. +Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector. +On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation. + +The case where no empty space is available is a special case, and is refered as the \em compressed mode. +It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS). +Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function. +In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j]. +Therefore, in practice a call to SparseMatrix::makeCompressed() frees this buffer. + +It is worth noting that most of our wrappers to external libraries requires compressed matrices as inputs. + +The results of %Eigen's operations always produces \b compressed sparse matrices. +On the other hand, the insertion of a new element into a SparseMatrix converts this later to the \b uncompressed mode. + +Here is the previous matrix represented in compressed mode: - + +
Values: 22735141178
Inner indices: 1202 42 14
InnerIndices: 1202 42 14
+ +
OuterStarts:02456\em 8
-Outer indices:
02456\em 7
-As you might guess, here the storage order is even more important than with dense matrices. We will therefore often make a clear difference between the \em inner and \em outer dimensions. For instance, it is efficient to loop over the coefficients of an \em inner \em vector (e.g., a column of a column-major matrix), but completely inefficient to do the same for an \em outer \em vector (e.g., a row of a column-major matrix). +A SparseVector is a special case of a SparseMatrix where only the \c Values and \c InnerIndices arrays are stored. +There is no notion of compressed/uncompressed mode for a SparseVector. -The SparseVector class implements the same compressed storage scheme but, of course, without any outer index buffer. -Since all nonzero coefficients of such a matrix are sequentially stored in memory, inserting a new nonzero near the "beginning" of the matrix can be extremely costly. As described below (\ref TutorialSparseFilling), one strategy is to fill nonzero coefficients in order. In cases where this is not possible, Eigen's sparse module also provides a DynamicSparseMatrix class which allows efficient random insertion. DynamicSparseMatrix is essentially implemented as an array of SparseVector, where the values and inner-indices arrays have been split into multiple small and resizable arrays. Assuming the number of nonzeros per inner vector is relatively small, this modification allows for very fast random insertion at the cost of a slight memory overhead (due to extra memory preallocated by each inner vector to avoid an expensive memory reallocation at every insertion) and a loss of compatibility with other sparse libraries used by some of our high-level solvers. Once complete, a DynamicSparseMatrix can be converted to a SparseMatrix to permit usage of these sparse libraries. +\section TutorialSparseExample First example -To summarize, it is recommended to use SparseMatrix whenever possible, and reserve the use of DynamicSparseMatrix to assemble a sparse matrix in cases when a SparseMatrix is not flexible enough. The respective pros/cons of both representations are summarized in the following table: +Before describing each individual class, let's start with the following typical example: solving the Lapace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions. +Such problem can be mathematically expressed as a linear problem of the form \f$ Ax=b \f$ where \f$ x \f$ is the vector of \c m unknowns (in our case, the values of the pixels), \f$ b \f$ is the right hand side vector resulting from the boundary conditions, and \f$ A \f$ is an \f$ m \times m \f$ matrix containing only a few non-zero elements resulting from the discretization of the Laplacian operator. - - - - - - - - - - - - - -
SparseMatrixDynamicSparseMatrix
memory efficiency*****
sorted insertion******
random insertion \n in sorted inner vector****
sorted insertion \n in random inner vector-***
random insertion-**
coeff wise unary operators******
coeff wise binary operators******
matrix products*****(*)
transpose*****
redux*****
*= scalar*****
Compatibility with highlevel solvers \n (TAUCS, Cholmod, SuperLU, UmfPack)***-
+ +\include Tutorial_sparse_example.cpp + + +\image html Tutorial_sparse_example.jpeg + + +In this example, we start by defining a column-major sparse matrix type of double \c SparseMatrix, and a triplet list of the same scalar type \c Triplet. A triplet is a simple object representing a non-zero entry as the triplet: \c row index, \c column index, \c value. + +In the main function, we declare a list \c coefficients of triplets (as a std vector) and the right hand side vector \f$ b \f$ which are filled by the \a buildProblem function. +The raw and flat list of non-zero entries is then converted to a true SparseMatrix object \c A. +Note that the elements of the list do not have to be sorted, and possible duplicate entries will be summed up. + +The last step consists of effectively solving the assembled problem. +Since the resulting matrix \c A is symmetric by construction, we can perform a direct Cholesky factorization via the SimplicialLDLT class which behaves like its LDLT counterpart for dense objects. + +The resulting vector \c x contains the pixel values as a 1D array which is saved to a jpeg file shown on the right of the code above. + +Describing the \a buildProblem and \a save functions is out of the scope of this tutorial. They are given \ref TutorialSparse_example_details "here" for the curious and reproducibility purpose. -\b Matrix \b and \b vector \b properties \n -Here mat and vec represent any sparse-matrix and sparse-vector type, respectively. +\section TutorialSparseSparseMatrix The SparseMatrix class + +\b %Matrix \b and \b vector \b properties \n + +The SparseMatrix and SparseVector classes take three template arguments: + * the scalar type (e.g., double) + * the storage order (ColMajor or RowMajor, the default is RowMajor) + * the inner index type (default is \c int). + +As for dense Matrix objects, constructors takes the size of the object. +Here are some examples: + +\code +SparseMatrix > mat(1000,2000); // declares a 1000x2000 column-major compressed sparse matrix of complex +SparseMatrix mat(1000,2000); // declares a 1000x2000 row-major compressed sparse matrix of double +SparseVector > vec(1000); // declares a column sparse vector of complex of size 1000 +SparseVector vec(1000); // declares a row sparse vector of double of size 1000 +\endcode + +In the rest of the tutorial, \c mat and \c vec represent any sparse-matrix and sparse-vector objects, respectively. + +The dimensions of a matrix can be queried using the following functions: \b Iterating \b over \b the \b nonzero \b coefficients \n -Iterating over the coefficients of a sparse matrix can be done only in the same order as the storage order. Here is an example: +Random access to the elements of a sparse object can be done through the \c coeffRef(i,j) function. +However, this function involves a quite expensive binary search. +In most cases, one only wants to iterate over the non-zeros elements. This is achieved by a standard loop over the outer dimension, and then by iterating over the non-zeros of the current inner vector via an InnerIterator. Thus, the non-zero entries have to be visited in the same order than the storage order. +Here is an example:
Standard \n dimensions\code mat.rows() @@ -105,13 +167,16 @@ vec.nonZeros() \endcode
\code -SparseMatrixType mat(rows,cols); -for (int k=0; k mat(rows,cols); +for (int k=0; k::InnerIterator it(mat,k); it; ++it) { it.value(); it.row(); // row index @@ -130,129 +195,258 @@ for (SparseVector::InnerIterator it(vec); it; ++it) \endcode
+For a writable expression, the referenced value can be modified using the valueRef() function. +If the type of the sparse matrix or vector depends on a template parameter, then the \c typename keyword is +required to indicate that \c InnerIterator denotes a type; see \ref TopicTemplateKeyword for details. \section TutorialSparseFilling Filling a sparse matrix -Owing to the special storage scheme of a SparseMatrix, it is obvious that for performance reasons a sparse matrix cannot be filled as easily as a dense matrix. For instance the cost of a purely random insertion into a SparseMatrix is in O(nnz) where nnz is the current number of non zeros. In order to cover all uses cases with best efficiency, Eigen provides various mechanisms, from the easiest but slowest, to the fastest but restrictive one. +Because of the special storage scheme of a SparseMatrix, special care has to be taken when adding new nonzero entries. +For instance, the cost of a single purely random insertion into a SparseMatrix is \c O(nnz), where \c nnz is the current number of non-zero coefficients. -If you don't have any prior knowledge about the order your matrix will be filled, then the best choice is to use a DynamicSparseMatrix. With a DynamicSparseMatrix, you can add or modify any coefficients at any time using the coeffRef(row,col) method. Here is an example: +The simplest way to create a sparse matrix while guaranteeing good performance is thus to first build a list of so-called \em triplets, and then convert it to a SparseMatrix. + +Here is a typical usage example: \code -DynamicSparseMatrix aux(1000,1000); -aux.reserve(estimated_number_of_non_zero); // optional -for (...) - for each j // the j can be random - for each i interacting with j // the i can be random - aux.coeffRef(i,j) += foo(i,j); -\endcode -Then the DynamicSparseMatrix object can be converted to a compact SparseMatrix to be used, e.g., by one of our supported solver: -\code -SparseMatrix mat(aux); -\endcode - -In order to optimize this process, instead of the generic coeffRef(i,j) method one can also use: - - \code m.insert(i,j) = value; \endcode which assumes the coefficient of coordinate (row,col) does not already exist (otherwise this is a programming error and your program will stop). - - \code m.insertBack(i,j) = value; \endcode which, in addition to the requirements of insert(), also assumes that the coefficient of coordinate (row,col) will be inserted at the end of the target inner-vector. More precisely, if the matrix m is column major, then the row index of the last non zero coefficient of the j-th column must be smaller than i. - - -Actually, the SparseMatrix class also supports random insertion via the insert() method. However, its uses should be reserved in cases where the inserted non zero is nearly the last one of the compact storage array. In practice, this means it should be used only to perform random (or sorted) insertion into the current inner-vector while filling the inner-vectors in an increasing order. Moreover, with a SparseMatrix an insertion session must be closed by a call to finalize() before any use of the matrix. Here is an example for a column major matrix: - -\code -SparseMatrix mat(1000,1000); -mat.reserve(estimated_number_of_non_zero); // optional -for each j // should be in increasing order for performance reasons - for each i interacting with j // the i can be random - mat.insert(i,j) = foo(i,j); // optional for a DynamicSparseMatrix -mat.finalize(); -\endcode - -Finally, the fastest way to fill a SparseMatrix object is to insert the elements in a purely coherence order (increasing inner index per increasing outer index). To this end, Eigen provides a very low but optimal API and illustrated below: - -\code -SparseMatrix mat(1000,1000); -mat.reserve(estimated_number_of_non_zero); // optional -for(int j=0; j<1000; ++j) +typedef Eigen::Triplet T; +std::vector tripletList; +triplets.reserve(estimation_of_entries); +for(...) { - mat.startVec(j); // optional for a DynamicSparseMatrix - for each i interacting with j // with increasing i - mat.insertBack(i,j) = foo(i,j); + // ... + tripletList.push_back(T(i,j,v_ij)); } -mat.finalize(); // optional for a DynamicSparseMatrix +SparseMatrixType mat(rows,cols); +mat.setFromTriplets(tripletList.begin(), tripletList.end()); +// mat is ready to go! \endcode -Note that there also exist the insertBackByOuterInner(Index outer, Index, inner) function which allows to write code agnostic to the storage order. +The \c std::vector of triplets might contain the elements in arbitrary order, and might even contain duplicated elements that will be summed up by setFromTriplets(). +See the SparseMatrix::setFromTriplets() function and class Triplet for more details. -\section TutorialSparseFeatureSet Supported operators and functions -In the following \em sm denote a sparse matrix, \em sv a sparse vector, \em dm a dense matrix, and \em dv a dense vector. -In Eigen's sparse module we chose to expose only the subset of the dense matrix API which can be efficiently implemented. Moreover, all combinations are not always possible. For instance, it is not possible to add two sparse matrices having two different storage order. On the other hand it is perfectly fine to evaluate a sparse matrix/expression to a matrix having a different storage order: +In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix. +A typical scenario of this approach is illustrated bellow: \code -SparseMatrixType sm1, sm2, sm3; -sm3 = sm1.transpose() + sm2; // invalid -sm3 = SparseMatrixType(sm1.transpose()) + sm2; // correct +1: SparseMatrix mat(rows,cols); // default is column major +2: mat.reserve(VectorXi::Constant(cols,6)); +3: for each i,j such that v_ij != 0 +4: mat.insert(i,j) = v_ij; // alternative: mat.coeffRef(i,j) += v_ij; +5: mat.makeCompressed(); // optional \endcode -Here are some examples of the supported operations: +- The key ingredient here is the line 2 where we reserve room for 6 non-zeros per column. In many cases, the number of non-zeros per column or row can easily be known in advance. If it varies significantly for each inner vector, then it is possible to specify a reserve size for each inner vector by providing a vector object with an operator[](int j) returning the reserve size of the \c j-th inner vector (e.g., via a VectorXi or std::vector). If only a rought estimate of the number of nonzeros per inner-vector can be obtained, it is highly recommended to overestimate it rather than the opposite. If this line is omitted, then the first insertion of a new element will reserve room for 2 elements per inner vector. +- The line 4 performs a sorted insertion. In this example, the ideal case is when the \c j-th column is not full and contains non-zeros whose inner-indices are smaller than \c i. In this case, this operation boils down to trivial O(1) operation. +- When calling insert(i,j) the element \c i \c ,j must not already exists, otherwise use the coeffRef(i,j) method that will allow to, e.g., accumulate values. This method first performs a binary search and finally calls insert(i,j) if the element does not already exist. It is more flexible than insert() but also more costly. +- The line 5 suppresses the remaining empty space and transforms the matrix into a compressed column storage. + + +\section TutorialSparseDirectSolvers Solving linear problems + +%Eigen currently provides a limited set of built-in solvers, as well as wrappers to external solver libraries. +They are summarized in the following table: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ClassModuleSolver kindMatrix kindFeatures related to performanceDependencies,License

Notes

SimplicialLLT \link SparseCholesky_Module SparseCholesky \endlinkDirect LLt factorizationSPDFill-in reducingbuilt-in, LGPLSimplicialLDLT is often preferable
SimplicialLDLT \link SparseCholesky_Module SparseCholesky \endlinkDirect LDLt factorizationSPDFill-in reducingbuilt-in, LGPLRecommended for very sparse and not too large problems (e.g., 2D Poisson eq.)
ConjugateGradient\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlinkClassic iterative CGSPDPreconditionningbuilt-in, LGPLRecommended for large symmetric problems (e.g., 3D Poisson eq.)
BiCGSTAB\link IterativeLinearSolvers_Module IterativeLinearSolvers \endlinkIterative stabilized bi-conjugate gradientSquarePreconditionningbuilt-in, LGPLMight not always converge
PastixLLT \n PastixLDLT \n PastixLU\link PaStiXSupport_Module PaStiXSupport \endlinkDirect LLt, LDLt, LU factorizationsSPD \n SPD \n SquareFill-in reducing, Leverage fast dense algebra, MultithreadingRequires the PaStiX package, \b CeCILL-C optimized for tough problems and symmetric patterns
CholmodSupernodalLLT\link CholmodSupport_Module CholmodSupport \endlinkDirect LLt factorizationSPDFill-in reducing, Leverage fast dense algebraRequires the SuiteSparse package, \b GPL
UmfPackLU\link UmfPackSupport_Module UmfPackSupport \endlinkDirect LU factorizationSquareFill-in reducing, Leverage fast dense algebraRequires the SuiteSparse package, \b GPL
SuperLU\link SuperLUSupport_Module SuperLUSupport \endlinkDirect LU factorizationSquareFill-in reducing, Leverage fast dense algebraRequires the SuperLU library, (BSD-like)
+ +Here \c SPD means symmetric positive definite. + +All these solvers follow the same general concept. +Here is a typical and general example: \code -s_1 *= 0.5; -sm4 = sm1 + sm2 + sm3; // only if s_1, s_2 and s_3 have the same storage order -sm3 = sm1 * sm2; -dv3 = sm1 * dv2; -dm3 = sm1 * dm2; -dm3 = dm2 * sm1; -sm3 = sm1.cwiseProduct(sm2); // only if s_1 and s_2 have the same storage order -dv2 = sm1.triangularView().solve(dv2); -\endcode - -The product of a sparse matrix A by a dense matrix/vector dv with A symmetric can be optimized by telling that to Eigen: -\code -res = A.selfadjointView<>() * dv; // if all coefficients of A are stored -res = A.selfadjointView() * dv; // if only the upper part of A is stored -res = A.selfadjointView() * dv; // if only the lower part of A is stored -\endcode - - -\section TutorialSparseDirectSolvers Using the direct solvers - -To solve a sparse problem you currently have to use one or multiple of the following "unsupported" module: -- \ref SparseExtra_Module - - \b solvers: SparseLLT, SparseLDLT (\#include ) - - \b notes: built-in basic LLT and LDLT solvers -- \ref CholmodSupport_Module - - \b solver: SparseLLT (\#include ) - - \b notes: LLT solving using Cholmod, requires a SparseMatrix object. (recommended for symmetric/selfadjoint problems) -- \ref UmfPackSupport_Module - - \b solver: SparseLU (\#include ) - - \b notes: LU solving using UmfPack, requires a SparseMatrix object (recommended for squared matrices) -- \ref SuperLUSupport_Module - - \b solver: SparseLU (\#include ) - - \b notes: (LU solving using SuperLU, requires a SparseMatrix object, recommended for squared matrices) -- \ref TaucsSupport_Module - - \b solver: SparseLLT (\#include ) - - \b notes: LLT solving using Taucs, requires a SparseMatrix object (not recommended) - -\warning Those modules are currently considered to be "unsupported" because 1) they are not documented, and 2) their API is likely to change in the future. - -Here is a typical example: -\code -#include +#include // ... SparseMatrix A; // fill A VectorXd b, x; // fill b -// solve Ax = b using UmfPack: -SparseLU,UmfPack> lu_of_A(A); -if(!lu_of_A.succeeded()) { - // decomposiiton failed +// solve Ax = b +SolverClassName > solver; +solver.compute(A); +if(solver.info()!=Succeeded) { + // decomposition failed return; } -if(!lu_of_A.solve(b,&x)) { +x = solver.solve(b); +if(solver.info()!=Succeeded) { // solving failed return; } +// solve for another right hand side: +x1 = solver.solve(b1); \endcode -See also the class SparseLLT, class SparseLU, and class SparseLDLT. +For \c SPD solvers, a second optional template argument allows to specify which triangular part have to be used, e.g.: + +\code +#include + +ConjugateGradient, Eigen::Upper> solver; +x = solver.compute(A).solve(b); +\endcode +In the above example, only the upper triangular part of the input matrix A is considered for solving. The opposite triangle might either be empty or contain arbitrary values. + +In the case where multiple problems with the same sparcity pattern have to be solved, then the "compute" step can be decomposed as follow: +\code +SolverClassName > solver; +solver.analyzePattern(A); // for this step the numerical values of A are not used +solver.factorize(A); +x1 = solver.solve(b1); +x2 = solver.solve(b2); +... +A = ...; // modify the values of the nonzeros of A, the nonzeros pattern must stay unchanged +solver.factorize(A); +x1 = solver.solve(b1); +x2 = solver.solve(b2); +... +\endcode +The compute() method is equivalent to calling both analyzePattern() and factorize(). + +Finally, each solver provides some specific features, such as determinant, access to the factors, controls of the iterations, and so on. +More details are availble in the documentations of the respective classes. + + +\section TutorialSparseFeatureSet Supported operators and functions + +Because of their special storage format, sparse matrices cannot offer the same level of flexbility than dense matrices. +In Eigen's sparse module we chose to expose only the subset of the dense matrix API which can be efficiently implemented. +In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm a dense matrix, and \em dv a dense vector. + +\subsection TutorialSparse_BasicOps Basic operations + +%Sparse expressions support most of the unary and binary coefficient wise operations: +\code +sm1.real() sm1.imag() -sm1 0.5*sm1 +sm1+sm2 sm1-sm2 sm1.cwiseProduct(sm2) +\endcode +However, a strong restriction is that the storage orders must match. For instance, in the following example: +\code +sm4 = sm1 + sm2 + sm3; +\endcode +sm1, sm2, and sm3 must all be row-major or all column major. +On the other hand, there is no restriction on the target matrix sm4. +For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order: +\code +SparseMatrix A, B; +B = SparseMatrix(A.transpose()) + A; +\endcode + +Binary coefficient wise operators can also mix sparse and dense expressions: +\code +sm2 = sm1.cwiseProduct(dm1); +dm2 = sm1 + dm1; +\endcode + + +%Sparse expressions also support transposition: +\code +sm1 = sm2.transpose(); +sm1 = sm2.adjoint(); +\endcode +However, there is no transposeInPlace() method. + + +\subsection TutorialSparse_Products Matrix products + +%Eigen supports various kind of sparse matrix products which are summarize below: + - \b sparse-dense: + \code +dv2 = sm1 * dv1; +dm2 = dm1 * sm1.adjoint(); +dm2 = 2. * sm1 * dm1; + \endcode + - \b symmetric \b sparse-dense. The product of a sparse symmetric matrix with a dense matrix (or vector) can also be optimized by specifying the symmetry with selfadjointView(): + \code +dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of A are stored +dm2 = A.selfadjointView() * dm1; // if only the upper part of A is stored +dm2 = A.selfadjointView() * dm1; // if only the lower part of A is stored + \endcode + - \b sparse-sparse. For sparse-sparse products, two different algorithms are available. The default one is conservative and preserve the explicit zeros that might appear: + \code +sm3 = sm1 * sm2; +sm3 = 4 * sm1.adjoint() * sm2; + \endcode + The second algorithm prunes on the fly the explicit zeros, or the values smaller than a given threshold. It is enabled and controlled through the prune() functions: + \code +sm3 = (sm1 * sm2).prune(); // removes numerical zeros +sm3 = (sm1 * sm2).prune(ref); // removes elements much smaller than ref +sm3 = (sm1 * sm2).prune(ref,epsilon); // removes elements smaller than ref*epsilon + \endcode + + - \b permutations. Finally, permutations can be applied to sparse matrices too: + \code +PermutationMatrix P = ...; +sm2 = P * sm1; +sm2 = sm1 * P.inverse(); +sm2 = sm1.transpose() * P; + \endcode + + +\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views + +Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side: +\code +dm2 = sm1.triangularView(dm1); +dv2 = sm1.transpose().triangularView(dv1); +\endcode + +The selfadjointView() function permits various operations: + - optimized sparse-dense matrix products: + \code +dm2 = sm1.selfadjointView<>() * dm1; // if all coefficients of A are stored +dm2 = A.selfadjointView() * dm1; // if only the upper part of A is stored +dm2 = A.selfadjointView() * dm1; // if only the lower part of A is stored + \endcode + - copy of triangular parts: + \code +sm2 = sm1.selfadjointView(); // makes a full selfadjoint matrix from the upper triangular part +sm2.selfadjointView() = sm1.selfadjointView(); // copies the upper triangular part to the lower triangular part + \endcode + - application of symmetric permutations: + \code +PermutationMatrix P = ...; +sm2 = A.selfadjointView().twistedBy(P); // compute P S P' from the upper triangular part of A, and make it a full matrix +sm2.selfadjointView() = A.selfadjointView().twistedBy(P); // compute P S P' from the lower triangular part of A, and then only compute the lower part + \endcode + +\subsection TutorialSparse_Submat Sub-matrices + +%Sparse matrices does not support yet the addressing of arbitrary sub matrices. Currently, one can only reference a set of contiguous \em inner vectors, i.e., a set of contiguous rows for a row-major matrix, or a set of contiguous columns for a column major matrix: +\code + sm1.innerVector(j); // returns an expression of the j-th column (resp. row) of the matrix if sm1 is col-major (resp. row-major) + sm1.innerVectors(j, nb); // returns an expression of the nb columns (resp. row) starting from the j-th column (resp. row) + // of the matrix if sm1 is col-major (resp. row-major) + sm1.middleRows(j, nb); // for row major matrices only, get a range of nb rows + sm1.middleCols(j, nb); // for column major matrices only, get a range of nb columns +\endcode \li \b Next: \ref TutorialMapClass diff --git a/gtsam/3rdparty/Eigen/doc/CMakeLists.txt b/gtsam/3rdparty/Eigen/doc/CMakeLists.txt index 50ce7ee0c..96bff41bf 100644 --- a/gtsam/3rdparty/Eigen/doc/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/doc/CMakeLists.txt @@ -36,6 +36,7 @@ set(snippets_targets "") add_definitions("-DEIGEN_MAKING_DOCS") add_subdirectory(examples) +add_subdirectory(special_examples) add_subdirectory(snippets) add_custom_target( diff --git a/gtsam/3rdparty/Eigen/doc/D09_StructHavingEigenMembers.dox b/gtsam/3rdparty/Eigen/doc/D09_StructHavingEigenMembers.dox index d6a24d951..51789ca9c 100644 --- a/gtsam/3rdparty/Eigen/doc/D09_StructHavingEigenMembers.dox +++ b/gtsam/3rdparty/Eigen/doc/D09_StructHavingEigenMembers.dox @@ -10,6 +10,7 @@ namespace Eigen { - \ref movetotop - \ref bugineigen - \ref conditional + - \ref othersolutions \section summary Executive Summary @@ -55,6 +56,8 @@ Foo *foo = new Foo; This macro makes "new Foo" always return an aligned pointer. +If this approach is too intrusive, see also the \ref othersolutions. + \section why Why is this needed? OK let's say that your code looks like this: @@ -132,6 +135,64 @@ Foo<4> *foo4 = new Foo<4>; // foo4 is guaranteed to be 128bit-aligned Foo<3> *foo3 = new Foo<3>; // foo3 has only the system default alignment guarantee \endcode + +\section othersolutions Other solutions + +In case putting the EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro everywhere is too intrusive, there exists at least two other solutions. + +\subsection othersolutions1 Disabling alignment + +The first is to disable alignment requirement for the fixed size members: +\code +class Foo +{ + ... + Eigen::Matrix v; + ... +}; +\endcode +This has for effect to disable vectorization when using \c v. +If a function of Foo uses it several times, then it still possible to re-enable vectorization by copying it into an aligned temporary vector: +\code +void Foo::bar() +{ + Eigen::Vector2d av(v); + // use av instead of v + ... + // if av changed, then do: + v = av; +} +\endcode + +\subsection othersolutions2 Private structure + +The second consist in storing the fixed-size objects into a private struct which will be dynamically allocated at the construction time of the main object: + +\code +struct Foo_d +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + Vector2d v; + ... +}; + + +struct Foo { + Foo() { init_d(); } + ~Foo() { delete d; } + void bar() + { + // use d->v instead of v + ... + } +private: + void init_d() { d = new Foo_d; } + Foo_d* d; +}; +\endcode + +The clear advantage here is that the class Foo remains unchanged regarding alignment issues. The drawback is that a heap allocation will be required whatsoever. + */ } diff --git a/gtsam/3rdparty/Eigen/doc/Doxyfile.in b/gtsam/3rdparty/Eigen/doc/Doxyfile.in index 6a659d9c2..e9e89d486 100644 --- a/gtsam/3rdparty/Eigen/doc/Doxyfile.in +++ b/gtsam/3rdparty/Eigen/doc/Doxyfile.in @@ -108,7 +108,7 @@ ALWAYS_DETAILED_SEC = NO # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. -INLINE_INHERITED_MEMB = NO +INLINE_INHERITED_MEMB = YES # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set @@ -206,7 +206,6 @@ ALIASES = "only_for_vectors=This is only for vectors (either row- "qr_module=This is defined in the %QR module. \code #include \endcode" \ "svd_module=This is defined in the %SVD module. \code #include \endcode" \ "label=\bug" \ - "redstar=*" \ "matrixworld=*" \ "arrayworld=*" \ "note_about_arbitrary_choice_of_solution=If there exists more than one solution, this method will arbitrarily choose one." \ @@ -303,7 +302,7 @@ TYPEDEF_HIDES_STRUCT = NO # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES -EXTRACT_ALL = NO +EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. @@ -561,8 +560,7 @@ WARN_LOGFILE = # with spaces. INPUT = "${Eigen_SOURCE_DIR}/Eigen" \ - "${Eigen_SOURCE_DIR}/doc" \ - "${Eigen_BINARY_DIR}/doc" + "${Eigen_SOURCE_DIR}/doc" # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is @@ -585,13 +583,17 @@ FILE_PATTERNS = * # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. -RECURSIVE = NO +RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. -EXCLUDE = +EXCLUDE = "${Eigen_SOURCE_DIR}/Eigen/Eigen2Support" \ + "${Eigen_SOURCE_DIR}/Eigen/src/Eigen2Support" \ + "${Eigen_SOURCE_DIR}/doc/examples" \ + "${Eigen_SOURCE_DIR}/doc/special_examples" \ + "${Eigen_SOURCE_DIR}/doc/snippets" # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded @@ -627,10 +629,8 @@ EXCLUDE_PATTERNS = CMake* \ # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test -# This is used to clean up the "class hierarchy" page - -EXCLUDE_SYMBOLS = EigenBase<* SparseMatrixBase<* DenseBase<* MatrixBase<* Matrix<* \ - ProductReturnType<* RotationBase<* Stride<* BandMatrix<* Block<* +# This could used to clean up the "class hierarchy" page +EXCLUDE_SYMBOLS = internal::* Flagged* *InnerIterator* DenseStorage<* # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see @@ -639,7 +639,9 @@ EXCLUDE_SYMBOLS = EigenBase<* SparseMatrixBase<* DenseBase<* MatrixBase<* EXAMPLE_PATH = "${Eigen_SOURCE_DIR}/doc/snippets" \ "${Eigen_BINARY_DIR}/doc/snippets" \ "${Eigen_SOURCE_DIR}/doc/examples" \ - "${Eigen_BINARY_DIR}/doc/examples" + "${Eigen_BINARY_DIR}/doc/examples" \ + "${Eigen_SOURCE_DIR}/doc/special_examples" \ + "${Eigen_BINARY_DIR}/doc/special_examples" # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp @@ -1229,7 +1231,8 @@ EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \ EIGEN_CWISE_BINOP_RETURN_TYPE \ EIGEN_CWISE_PRODUCT_RETURN_TYPE \ EIGEN_CURRENT_STORAGE_BASE_CLASS \ - _EIGEN_GENERIC_PUBLIC_INTERFACE + _EIGEN_GENERIC_PUBLIC_INTERFACE \ + EIGEN2_SUPPORT # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone diff --git a/gtsam/3rdparty/Eigen/doc/I00_CustomizingEigen.dox b/gtsam/3rdparty/Eigen/doc/I00_CustomizingEigen.dox index 766ff6f95..623ef31e1 100644 --- a/gtsam/3rdparty/Eigen/doc/I00_CustomizingEigen.dox +++ b/gtsam/3rdparty/Eigen/doc/I00_CustomizingEigen.dox @@ -120,19 +120,22 @@ Eigen::MatrixBase, 10000, 1, 2, 10000, 1> \anchor user_defined_scalars \section CustomScalarType Using custom scalar types -By default, Eigen currently supports the following scalar types: \c int, \c float, \c double, \c std::complex, \c std::complex, \c long \c double, \c long \c long \c int (64 bits integers), and \c bool. The \c long \c double is especially useful on x86-64 systems or when the SSE2 instruction set is enabled because it enforces the use of x87 registers with extended accuracy. +By default, Eigen currently supports standard floating-point types (\c float, \c double, \c std::complex, \c std::complex, \c long \c double), as well as all integrale types (e.g., \c int, \c unsigned \c int, \c short, etc.), and \c bool. +On x86-64 systems, \c long \c double permits to locally enforces the use of x87 registers with extended accuracy (in comparison to SSE). In order to add support for a custom type \c T you need: - 1 - make sure the common operator (+,-,*,/,etc.) are supported by the type \c T - 2 - add a specialization of struct Eigen::NumTraits (see \ref NumTraits) - 3 - define a couple of math functions for your type such as: internal::sqrt, internal::abs, etc... +-# make sure the common operator (+,-,*,/,etc.) are supported by the type \c T +-# add a specialization of struct Eigen::NumTraits (see \ref NumTraits) +-# define the math functions that makes sense for your type. This includes standard ones like sqrt, pow, sin, tan, conj, real, imag, etc, as well as abs2 which is Eigen specific. (see the file Eigen/src/Core/MathFunctions.h) +The math function should be defined in the same namespace than \c T, or in the \c std namespace though that second appraoch is not recommended. + Here is a concrete example adding support for the Adolc's \c adouble type. Adolc is an automatic differentiation library. The type \c adouble is basically a real value tracking the values of any number of partial derivatives. \code -#ifndef ADLOCSUPPORT_H -#define ADLOCSUPPORT_H +#ifndef ADOLCSUPPORT_H +#define ADOLCSUPPORT_H #define ADOLC_TAPELESS #include @@ -141,6 +144,7 @@ Here is a concrete example adding support for the Adolc's \c adouble type. struct NumTraits + : NumTraits // permits to get the epsilon, dummy_precision, lowest, highest functions { typedef adtl::adouble Real; typedef adtl::adouble NonInteger; @@ -149,35 +153,27 @@ template<> struct NumTraits enum { IsComplex = 0, IsInteger = 0, - IsSigned, + IsSigned = 1, + RequireInitialization = 1, ReadCost = 1, - AddCost = 1, - MulCost = 1 + AddCost = 3, + MulCost = 3 }; }; } -// the Adolc's type adouble is defined in the adtl namespace -// therefore, the following internal::* functions *must* be defined -// in the same namespace namespace adtl { - inline const adouble& internal::conj(const adouble& x) { return x; } - inline const adouble& internal::real(const adouble& x) { return x; } - inline adouble internal::imag(const adouble&) { return 0.; } - inline adouble internal::abs(const adouble& x) { return fabs(x); } - inline adouble internal::abs2(const adouble& x) { return x*x; } - inline adouble internal::sqrt(const adouble& x) { return sqrt(x); } - inline adouble internal::exp(const adouble& x) { return exp(x); } - inline adouble internal::log(const adouble& x) { return log(x); } - inline adouble internal::sin(const adouble& x) { return sin(x); } - inline adouble internal::cos(const adouble& x) { return cos(x); } - inline adouble internal::pow(const adouble& x, adouble y) { return pow(x, y); } +inline const adouble& conj(const adouble& x) { return x; } +inline const adouble& real(const adouble& x) { return x; } +inline adouble imag(const adouble&) { return 0.; } +inline adouble abs(const adouble& x) { return fabs(x); } +inline adouble abs2(const adouble& x) { return x*x; } } -#endif // ADLOCSUPPORT_H +#endif // ADOLCSUPPORT_H \endcode diff --git a/gtsam/3rdparty/Eigen/doc/I09_Vectorization.dox b/gtsam/3rdparty/Eigen/doc/I09_Vectorization.dox index 63831e59f..274d0451b 100644 --- a/gtsam/3rdparty/Eigen/doc/I09_Vectorization.dox +++ b/gtsam/3rdparty/Eigen/doc/I09_Vectorization.dox @@ -1,6 +1,6 @@ namespace Eigen { -/** \page TopicVectorization Vectorizaion +/** \page TopicVectorization Vectorization TODO: write this dox page! diff --git a/gtsam/3rdparty/Eigen/doc/I14_PreprocessorDirectives.dox b/gtsam/3rdparty/Eigen/doc/I14_PreprocessorDirectives.dox index b34fc84cf..2826a7f50 100644 --- a/gtsam/3rdparty/Eigen/doc/I14_PreprocessorDirectives.dox +++ b/gtsam/3rdparty/Eigen/doc/I14_PreprocessorDirectives.dox @@ -2,66 +2,75 @@ namespace Eigen { /** \page TopicPreprocessorDirectives Preprocessor directives -You can control some aspects of Eigen by defining the preprocessor tokens using \c \#define. These macros -should be defined before any Eigen headers are included. Often they are best set in the project options. +You can control some aspects of %Eigen by defining the preprocessor tokens using \c \#define. These macros +should be defined before any %Eigen headers are included. Often they are best set in the project options. -This page lists the preprocesor tokens recognised by Eigen. +This page lists the preprocesor tokens recognised by %Eigen. Table of contents - \ref TopicPreprocessorDirectivesMajor - \ref TopicPreprocessorDirectivesAssertions - \ref TopicPreprocessorDirectivesPerformance - \ref TopicPreprocessorDirectivesPlugins + - \ref TopicPreprocessorDirectivesDevelopers \section TopicPreprocessorDirectivesMajor Macros with major effects +These macros have a major effect and typically break the API (Application Programming Interface) and/or the +ABI (Application Binary Interface). This can be rather dangerous: if parts of your program are compiled with +one option, and other parts (or libraries that you use) are compiled with another option, your program may +fail to link or exhibit subtle bugs. Nevertheless, these options can be useful for people who know what they +are doing. + - \b EIGEN2_SUPPORT - if defined, enables the Eigen2 compatibility mode. This is meant to ease the transition of Eigen2 to Eigen3 (see \ref Eigen2ToEigen3). Not defined by default. - \b EIGEN2_SUPPORT_STAGEnn_xxx (for various values of nn and xxx) - staged migration path from Eigen2 to Eigen3; see \ref Eigen2SupportModes. - - \b EIGEN_DEFAULT_TO_ROW_MAJOR - when defined, the default storage order for matrices becomes row-major - instead of column-major. Not defined by default. - \b EIGEN_DEFAULT_DENSE_INDEX_TYPE - the type for column and row indices in matrices, vectors and array (DenseBase::Index). Set to \c std::ptrdiff_t by default. - \b EIGEN_DEFAULT_IO_FORMAT - the IOFormat to use when printing a matrix if no #IOFormat is specified. Defaults to the #IOFormat constructed by the default constructor IOFormat(). - \b EIGEN_INITIALIZE_MATRICES_BY_ZERO - if defined, all entries of newly constructed matrices and arrays are initializes to zero, as are new entries in matrices and arrays after resizing. Not defined by default. + - \b EIGEN_NO_AUTOMATIC_RESIZING - if defined, the matrices (or arrays) on both sides of an assignment + a = b have to be of the same size; otherwise, %Eigen automatically resizes \c a so that it is of + the correct size. Not defined by default. \section TopicPreprocessorDirectivesAssertions Assertions -The Eigen library contains many assertions to guard against programming errors, both at compile time and at +The %Eigen library contains many assertions to guard against programming errors, both at compile time and at run time. However, these assertions do cost time and can thus be turned off. - - \b EIGEN_NO_DEBUG - disables Eigen's assertions if defined. Not defined by default, unless the + - \b EIGEN_NO_DEBUG - disables %Eigen's assertions if defined. Not defined by default, unless the \c NDEBUG macro is defined (this is a standard C++ macro which disables all asserts). - \b EIGEN_NO_STATIC_ASSERT - if defined, compile-time static assertions are replaced by runtime assertions; this saves compilation time. Not defined by default. - - \b EIGEN_INTERNAL_DEBUGGING - if defined, enables assertions in Eigen's internal routines. This is useful - for debugging Eigen itself. Not defined by default. + - \b eigen_assert - macro with one argument that is used inside %Eigen for assertions. By default, it is + basically defined to be \c assert, which aborts the program if the assertion is violated. Redefine this + macro if you want to do something else, like throwing an exception. \section TopicPreprocessorDirectivesPerformance Alignment, vectorization and performance tweaking - - \b EIGEN_DONT_ALIGN - disables alignment completely. Eigen will not try to align its objects and does not + - \b EIGEN_DONT_ALIGN - disables alignment completely. %Eigen will not try to align its objects and does not expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default. - \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless \c EIGEN_DONT_ALIGN is defined. - \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless - alignment is disabled by Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN. + alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN. - \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. The only optimization this currently includes is single precision sin() and cos() in the present of SSE vectorization. Defined by default. - \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable - unrolling. The size of a loop here is expressed in Eigen's own notion of "number of FLOPS", it does not + unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not correspond to the number of iterations or the number of instructions. The default is value 100. \section TopicPreprocessorDirectivesPlugins Plugins -It is possible to add new methods to many fundamental classes in Eigen by writing a plugin. As explained in +It is possible to add new methods to many fundamental classes in %Eigen by writing a plugin. As explained in the section \ref ExtendingMatrixBase, the plugin is specified by defining a \c EIGEN_xxx_PLUGIN macro. The following macros are supported; none of them are defined by default. @@ -81,6 +90,21 @@ following macros are supported; none of them are defined by default. - \b EIGEN_FUNCTORS_PLUGIN - filename of plugin for adding new functors and specializations of functor_traits. +\section TopicPreprocessorDirectivesDevelopers Macros for Eigen developers + +These macros are mainly meant for people developing %Eigen and for testing purposes. Even though, they might be useful for power users and the curious for debugging and testing purpose, they \b should \b not \b be \b used by real-word code. + + - \b EIGEN_DEFAULT_TO_ROW_MAJOR - when defined, the default storage order for matrices becomes row-major + instead of column-major. Not defined by default. + - \b EIGEN_INTERNAL_DEBUGGING - if defined, enables assertions in %Eigen's internal routines. This is useful + for debugging %Eigen itself. Not defined by default. + - \b EIGEN_NO_MALLOC - if defined, any request from inside the %Eigen to allocate memory from the heap + results in an assertion failure. This is useful to check that some routine does not allocate memory + dynamically. Not defined by default. + - \b EIGEN_RUNTIME_NO_MALLOC - if defined, a new switch is introduced which can be turned on and off by + calling set_is_malloc_allowed(bool). If malloc is not allowed and %Eigen tries to allocate memory + dynamically anyway, an assertion failure results. Not defined by default. + */ } diff --git a/gtsam/3rdparty/Eigen/doc/I15_StorageOrders.dox b/gtsam/3rdparty/Eigen/doc/I15_StorageOrders.dox index 6b56ca8f8..7418912a6 100644 --- a/gtsam/3rdparty/Eigen/doc/I15_StorageOrders.dox +++ b/gtsam/3rdparty/Eigen/doc/I15_StorageOrders.dox @@ -60,10 +60,8 @@ parameter is set to \c RowMajor, then the matrix or array is stored in row-major \c ColMajor, then it is stored in column-major order. This mechanism is used in the above Eigen program to specify the storage order. -If the storage order is not specified, then Eigen normally defaults to storing the entry in column-major -order. This is also the case if one of the convenience typedefs (\c Matrix3f, \c ArrayXXd, etc.) is -used. However, it is possible to change the default to row-major order by defining the -\c EIGEN_DEFAULT_TO_ROW_MAJOR \ref TopicPreprocessorDirectives "preprocessor directive". +If the storage order is not specified, then Eigen defaults to storing the entry in column-major. This is also +the case if one of the convenience typedefs (\c Matrix3f, \c ArrayXXd, etc.) is used. Matrices and arrays using one storage order can be assigned to matrices and arrays using the other storage order, as happens in the above program when \c Arowmajor is initialized using \c Acolmajor. Eigen will reorder diff --git a/gtsam/3rdparty/Eigen/doc/I16_TemplateKeyword.dox b/gtsam/3rdparty/Eigen/doc/I16_TemplateKeyword.dox new file mode 100644 index 000000000..324532310 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/I16_TemplateKeyword.dox @@ -0,0 +1,136 @@ +namespace Eigen { + +/** \page TopicTemplateKeyword The template and typename keywords in C++ + +There are two uses for the \c template and \c typename keywords in C++. One of them is fairly well known +amongst programmers: to define templates. The other use is more obscure: to specify that an expression refers +to a template function or a type. This regularly trips up programmers that use the %Eigen library, often +leading to error messages from the compiler that are difficult to understand. + +Table of contents + - \ref TopicTemplateKeywordToDefineTemplates + - \ref TopicTemplateKeywordExample + - \ref TopicTemplateKeywordExplanation + - \ref TopicTemplateKeywordResources + + +\section TopicTemplateKeywordToDefineTemplates Using the template and typename keywords to define templates + +The \c template and \c typename keywords are routinely used to define templates. This is not the topic of this +page as we assume that the reader is aware of this (otherwise consult a C++ book). The following example +should illustrate this use of the \c template keyword. + +\code +template +bool isPositive(T x) +{ + return x > 0; +} +\endcode + +We could just as well have written template <class T>; the keywords \c typename and \c class have the +same meaning in this context. + + +\section TopicTemplateKeywordExample An example showing the second use of the template keyword + +Let us illustrate the second use of the \c template keyword with an example. Suppose we want to write a +function which copies all entries in the upper triangular part of a matrix into another matrix, while keeping +the lower triangular part unchanged. A straightforward implementation would be as follows: + + + + +
Example:Output:
+\include TemplateKeyword_simple.cpp + +\verbinclude TemplateKeyword_simple.out +
+ +That works fine, but it is not very flexible. First, it only works with dynamic-size matrices of +single-precision floats; the function \c copyUpperTriangularPart() does not accept static-size matrices or +matrices with double-precision numbers. Second, if you use an expression such as +mat.topLeftCorner(3,3) as the parameter \c src, then this is copied into a temporary variable of type +MatrixXf; this copy can be avoided. + +As explained in \ref TopicFunctionTakingEigenTypes, both issues can be resolved by making +\c copyUpperTriangularPart() accept any object of type MatrixBase. This leads to the following code: + + + + +
Example:Output:
+\include TemplateKeyword_flexible.cpp + +\verbinclude TemplateKeyword_flexible.out +
+ +The one line in the body of the function \c copyUpperTriangularPart() shows the second, more obscure use of +the \c template keyword in C++. Even though it may look strange, the \c template keywords are necessary +according to the standard. Without it, the compiler may reject the code with an error message like "no match +for operator<". + + +\section TopicTemplateKeywordExplanation Explanation + +The reason that the \c template keyword is necessary in the last example has to do with the rules for how +templates are supposed to be compiled in C++. The compiler has to check the code for correct syntax at the +point where the template is defined, without knowing the actual value of the template arguments (\c Derived1 +and \c Derived2 in the example). That means that the compiler cannot know that dst.triangularPart is +a member template and that the following < symbol is part of the delimiter for the template +parameter. Another possibility would be that dst.triangularPart is a member variable with the < +symbol refering to the operator<() function. In fact, the compiler should choose the second +possibility, according to the standard. If dst.triangularPart is a member template (as in our case), +the programmer should specify this explicitly with the \c template keyword and write dst.template +triangularPart. + +The precise rules are rather complicated, but ignoring some subtleties we can summarize them as follows: +- A dependent name is name that depends (directly or indirectly) on a template parameter. In the + example, \c dst is a dependent name because it is of type MatrixBase<Derived1> which depends + on the template parameter \c Derived1. +- If the code contains either one of the contructions xxx.yyy or xxx->yyy and \c xxx is a + dependent name and \c yyy refers to a member template, then the \c template keyword must be used before + \c yyy, leading to xxx.template yyy or xxx->template yyy. +- If the code contains the contruction xxx::yyy and \c xxx is a dependent name and \c yyy refers to a + member typedef, then the \c typename keyword must be used before the whole construction, leading to + typename xxx::yyy. + +As an example where the \c typename keyword is required, consider the following code in \ref TutorialSparse +for iterating over the non-zero entries of a sparse matrix type: + +\code +SparseMatrixType mat(rows,cols); +for (int k=0; k +void iterateOverSparseMatrix(const SparseMatrix& mat; +{ + for (int k=0; k::InnerIterator it(mat,k); it; ++it) + { + /* ... */ + } +} +\endcode + + +\section TopicTemplateKeywordResources Resources for further reading + +For more information and a fuller explanation of this topic, the reader may consult the following sources: +- The book "C++ Template Metaprogramming" by David Abrahams and Aleksey Gurtovoy contains a very good + explanation in Appendix B ("The typename and template Keywords") which formed the basis for this page. +- http://pages.cs.wisc.edu/~driscoll/typename.html +- http://www.parashift.com/c++-faq-lite/templates.html#faq-35.18 +- http://www.comeaucomputing.com/techtalk/templates/#templateprefix +- http://www.comeaucomputing.com/techtalk/templates/#typename + +*/ +} diff --git a/gtsam/3rdparty/Eigen/doc/Overview.dox b/gtsam/3rdparty/Eigen/doc/Overview.dox index 68e07ad63..2657c85bc 100644 --- a/gtsam/3rdparty/Eigen/doc/Overview.dox +++ b/gtsam/3rdparty/Eigen/doc/Overview.dox @@ -34,12 +34,15 @@ For a first contact with Eigen, the best place is to have a look at the \ref Get - \ref TopicLazyEvaluation - \ref TopicLinearAlgebraDecompositions - \ref TopicCustomizingEigen + - \ref TopicMultiThreading - \ref TopicPreprocessorDirectives - \ref TopicStorageOrders - \ref TopicInsideEigenExample - \ref TopicWritingEfficientProductExpression - \ref TopicClassHierarchy - \ref TopicFunctionTakingEigenTypes + - \ref TopicTemplateKeyword + - \ref TopicUsingIntelMKL - Topics related to alignment issues - \ref TopicUnalignedArrayAssert - \ref TopicFixedSizeVectorizable diff --git a/gtsam/3rdparty/Eigen/doc/QuickReference.dox b/gtsam/3rdparty/Eigen/doc/QuickReference.dox index e23ff7ce5..3310d390a 100644 --- a/gtsam/3rdparty/Eigen/doc/QuickReference.dox +++ b/gtsam/3rdparty/Eigen/doc/QuickReference.dox @@ -412,8 +412,8 @@ array1 <= array2 array1 >= array2 array1 <= scalar array1 >= scalar array1 == array2 array1 != array2 array1 == scalar array1 != scalar \endcode Trigo, power, and \n misc functions \n and the STL variants\code -array1.min(array2) std::min(array1,array2) -array1.max(array2) std::max(array1,array2) +array1.min(array2) +array1.max(array2) array1.abs2() array1.abs() std::abs(array1) array1.sqrt() std::sqrt(array1) @@ -440,10 +440,10 @@ Eigen provides several reduction methods such as: \link DenseBase::sum() sum() \endlink, \link DenseBase::prod() prod() \endlink, \link MatrixBase::trace() trace() \endlink \matrixworld, \link MatrixBase::norm() norm() \endlink \matrixworld, \link MatrixBase::squaredNorm() squaredNorm() \endlink \matrixworld, -\link DenseBase::all() all() \endlink \redstar,and \link DenseBase::any() any() \endlink \redstar. +\link DenseBase::all() all() \endlink, and \link DenseBase::any() any() \endlink. All reduction operations can be done matrix-wise, -\link DenseBase::colwise() column-wise \endlink \redstar or -\link DenseBase::rowwise() row-wise \endlink \redstar. Usage example: +\link DenseBase::colwise() column-wise \endlink or +\link DenseBase::rowwise() row-wise \endlink. Usage example:
\code 5 3 1 @@ -586,6 +586,9 @@ mat3 = mat1 * diag1.inverse() TriangularView gives a view on a triangular part of a dense matrix and allows to perform optimized operations on it. The opposite triangular part is never referenced and can be used to store other information. +\note The .triangularView() template member function requires the \c template keyword if it is used on an +object of a type that depends on a template parameter; see \ref TopicTemplateKeyword for details. +
OperationCode
@@ -630,6 +633,9 @@ Just as for triangular matrix, you can reference any triangular part of a square matrix and perform special and optimized operations. Again the opposite triangular part is never referenced and can be used to store other information. +\note The .selfadjointView() template member function requires the \c template keyword if it is used on an +object of a type that depends on a template parameter; see \ref TopicTemplateKeyword for details. + - + @@ -94,7 +94,7 @@ namespace Eigen { - + @@ -132,7 +132,7 @@ namespace Eigen { - + @@ -245,6 +245,10 @@ namespace Eigen {
Blocking
Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.
+
Implicit Multi Threading (MT)
+
Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product rountines.
+
Explicit Multi Threading (MT)
+
Means the algorithm is explicitely parallelized to take advantage of multicore processors via OpenMP.
Meta-unroller
Means the algorithm is automatically and explicitly unrolled for very small fixed size matrices.
diff --git a/gtsam/3rdparty/Eigen/doc/TopicMultithreading.dox b/gtsam/3rdparty/Eigen/doc/TopicMultithreading.dox new file mode 100644 index 000000000..f7d082668 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/TopicMultithreading.dox @@ -0,0 +1,46 @@ +namespace Eigen { + +/** \page TopicMultiThreading Eigen and multi-threading + +\section TopicMultiThreading_MakingEigenMT Make Eigen run in parallel + +Some Eigen's algorithms can exploit the multiple cores present in your hardware. To this end, it is enough to enable OpenMP on your compiler, for instance: + * GCC: \c -fopenmp + * ICC: \c -openmp + * MSVC: check the respective option in the build properties. +You can control the number of thread that will be used using either the OpenMP API or Eiegn's API using the following priority: +\code + OMP_NUM_THREADS=n ./my_program + omp_set_num_threads(n); + Eigen::setNbThreads(n); +\endcode +Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode +You can query the number of threads that will be used with: +\code +n = Eigen::nbThreads(n); +\endcode +You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token. + +Currently, the following algorithms can make use of multi-threading: + * general matrix - matrix products + * PartialPivLU + +\section TopicMultiThreading_UsingEigenWithMT Using Eigen in a multi-threaded application + +In the case your own application is multithreaded, and multiple threads make calls to Eigen, then you have to initialize Eigen by calling the following routine \b before creating the threads: +\code +#include + +int main(int argc, char** argv) +{ + Eigen::initParallel(); + + ... +} +\endcode + +In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallization as detailed in the previous section. + +*/ + +} diff --git a/gtsam/3rdparty/Eigen/doc/TutorialSparse_example_details.dox b/gtsam/3rdparty/Eigen/doc/TutorialSparse_example_details.dox new file mode 100644 index 000000000..0438da8bb --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/TutorialSparse_example_details.dox @@ -0,0 +1,4 @@ +/** +\page TutorialSparse_example_details +\include Tutorial_sparse_example_details.cpp +*/ diff --git a/gtsam/3rdparty/Eigen/doc/UsingIntelMKL.dox b/gtsam/3rdparty/Eigen/doc/UsingIntelMKL.dox new file mode 100644 index 000000000..379ee3ffd --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/UsingIntelMKL.dox @@ -0,0 +1,168 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + Copyright (C) 2011 Gael Guennebaud + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Documentation on the use of Intel MKL through Eigen + ******************************************************************************** +*/ + +namespace Eigen { + +/** \page TopicUsingIntelMKL Using Intel® Math Kernel Library from Eigen + +\section TopicUsingIntelMKL_Intro Eigen and Intel® Math Kernel Library (Intel® MKL) + +Since Eigen version 3.1 and later, users can benefit from built-in Intel MKL optimizations with an installed copy of Intel MKL 10.3 (or later). + Intel MKL provides highly optimized multi-threaded mathematical routines for x86-compatible architectures. +Intel MKL is available on Linux, Mac and Windows for both Intel64 and IA32 architectures. + +\warning Be aware that Intel® MKL is a proprietary software. It is the responsibility of the users to buy MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL. As a consequence, this also means that Eigen has to be used through the LGPL3+ license. + +Using Intel MKL through Eigen is easy: +-# define the \c EIGEN_USE_MKL_ALL macro before including any Eigen's header +-# link your program to MKL libraries (see the MKL linking advisor) +-# on a 64bits system, you must use the LP64 interface (not the ILP64 one) + +When doing so, a number of Eigen's algorithms are silently substituted with calls to Intel MKL routines. +These substitutions apply only for \b Dynamic \b or \b large enough objects with one of the following four standard scalar types: \c float, \c double, \c complex, and \c complex. +Operations on other scalar types or mixing reals and complexes will continue to use the built-in algorithms. + +In addition you can coarsely select choose which parts will be substituted by defining one or multiple of the following macros: + +
OperationCode
diff --git a/gtsam/3rdparty/Eigen/doc/SparseQuickReference.dox b/gtsam/3rdparty/Eigen/doc/SparseQuickReference.dox new file mode 100644 index 000000000..7d6eb0fa9 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/SparseQuickReference.dox @@ -0,0 +1,198 @@ +namespace Eigen { +/** \page SparseQuickRefPage Quick reference guide for sparse matrices + +\b Table \b of \b contents + - \ref Constructors + - \ref SparseMatrixInsertion + - \ref SparseBasicInfos + - \ref SparseBasicOps + - \ref SparseInterops + - \ref sparsepermutation + - \ref sparsesubmatrices + - \ref sparseselfadjointview +\n + +
+ +In this page, we give a quick summary of the main operations available for sparse matrices in the class SparseMatrix. First, it is recommended to read first the introductory tutorial at \ref TutorialSparse. The important point to have in mind when working on sparse matrices is how they are stored : +i.e either row major or column major. The default is column major. Most arithmetic operations on sparse matrices will assert that they have the same storage order. Moreover, when interacting with external libraries that are not yet supported by Eigen, it is important to know how to send the required matrix pointers. + +\section Constructors Constructors and assignments +SparseMatrix is the core class to build and manipulate sparse matrices in Eigen. It takes as template parameters the Scalar type and the storage order, either RowMajor or ColumnMajor. The default is ColumnMajor. + +\code + SparseMatrix sm1(1000,1000); // 1000x1000 compressed sparse matrix of double. + SparseMatrix,RowMajor> sm2; // Compressed row major matrix of complex double. +\endcode +The copy constructor and assignment can be used to convert matrices from a storage order to another +\code + SparseMatrix sm1; + // Eventually fill the matrix sm1 ... + SparseMatrix sm2(sm1), sm3; // Initialize sm2 with sm1. + sm3 = sm1; // Assignment and evaluations modify the storage order. + \endcode + +\section SparseMatrixInsertion Allocating and inserting values +resize() and reserve() are used to set the size and allocate space for nonzero elements + \code + sm1.resize(m,n); //Change sm to a mxn matrix. + sm1.reserve(nnz); // Allocate room for nnz nonzeros elements. + \endcode +Note that when calling reserve(), it is not required that nnz is the exact number of nonzero elements in the final matrix. However, an exact estimation will avoid multiple reallocations during the insertion phase. + +Insertions of values in the sparse matrix can be done directly by looping over nonzero elements and use the insert() function +\code +// Direct insertion of the value v_ij; + sm1.insert(i, j) = v_ij; // It is assumed that v_ij does not already exist in the matrix. +\endcode + +After insertion, a value at (i,j) can be modified using coeffRef() +\code + // Update the value v_ij + sm1.coeffRef(i,j) = v_ij; + sm1.coeffRef(i,j) += v_ij; + sm1.coeffRef(i,j) -= v_ij; + ... +\endcode + +The recommended way to insert values is to build a list of triplets (row, col, val) and then call setFromTriplets(). +\code + sm1.setFromTriplets(TripletList.begin(), TripletList.end()); +\endcode +A complete example is available at \ref TutorialSparseFilling. + +The following functions can be used to set constant or random values in the matrix. +\code + sm1.setZero(); // Reset the matrix with zero elements + ... +\endcode + +\section SparseBasicInfos Matrix properties +Beyond the functions rows() and cols() that are used to get the number of rows and columns, there are some useful functions that are available to easily get some informations from the matrix. + + + + +
\code + sm1.rows(); // Number of rows + sm1.cols(); // Number of columns + sm1.nonZeros(); // Number of non zero values + sm1.outerSize(); // Number of columns (resp. rows) for a column major (resp. row major ) + sm1.innerSize(); // Number of rows (resp. columns) for a row major (resp. column major) + sm1.norm(); // (Euclidian ??) norm of the matrix + sm1.squaredNorm(); // + sm1.isVector(); // Check if sm1 is a sparse vector or a sparse matrix + ... + \endcode
+ +\section SparseBasicOps Arithmetic operations +It is easy to perform arithmetic operations on sparse matrices provided that the dimensions are adequate and that the matrices have the same storage order. Note that the evaluation can always be done in a matrix with a different storage order. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Operations Code Notes
add subtract \code + sm3 = sm1 + sm2; + sm3 = sm1 - sm2; + sm2 += sm1; + sm2 -= sm1; \endcode + + sm1 and sm2 should have the same storage order +
+ scalar product\code + sm3 = sm1 * s1; sm3 *= s1; + sm3 = s1 * sm1 + s2 * sm2; sm3 /= s1;\endcode + + Many combinations are possible if the dimensions and the storage order agree. +
Product \code + sm3 = sm1 * sm2; + dm2 = sm1 * dm1; + dv2 = sm1 * dv1; + \endcode +
transposition, adjoint \code + sm2 = sm1.transpose(); + sm2 = sm1.adjoint(); + \endcode + Note that the transposition change the storage order. There is no support for transposeInPlace(). +
+ Component-wise ops + \code + sm1.cwiseProduct(sm2); + sm1.cwiseQuotient(sm2); + sm1.cwiseMin(sm2); + sm1.cwiseMax(sm2); + sm1.cwiseAbs(); + sm1.cwiseSqrt(); + \endcode + sm1 and sm2 should have the same storage order +
+ + +\section SparseInterops Low-level storage +There are a set of low-levels functions to get the standard compressed storage pointers. The matrix should be in compressed mode which can be checked by calling isCompressed(); makeCompressed() should do the job otherwise. +\code + // Scalar pointer to the values of the matrix, size nnz + sm1.valuePtr(); + // Index pointer to get the row indices (resp. column indices) for column major (resp. row major) matrix, size nnz + sm1.innerIndexPtr(); + // Index pointer to the beginning of each row (resp. column) in valuePtr() and innerIndexPtr() for column major (row major). The size is outersize()+1; + sm1.outerIndexPtr(); +\endcode +These pointers can therefore be easily used to send the matrix to some external libraries/solvers that are not yet supported by Eigen. + +\section sparsepermutation Permutations, submatrices and Selfadjoint Views +In many cases, it is necessary to reorder the rows and/or the columns of the sparse matrix for several purposes : fill-in reducing during matrix decomposition, better data locality for sparse matrix-vector products... The class PermutationMatrix is available to this end. + \code + PermutationMatrix perm; + // Reserve and fill the values of perm; + perm.inverse(n); // Compute eventually the inverse permutation + sm1.twistedBy(perm) //Apply the permutation on rows and columns + sm2 = sm1 * perm; // ??? Apply the permutation on columns ???; + sm2 = perm * sm1; // ??? Apply the permutation on rows ???; + \endcode + +\section sparsesubmatrices Sub-matrices +The following functions are useful to extract a block of rows (resp. columns) from a row-major (resp. column major) sparse matrix. Note that because of the particular storage, it is not ?? efficient ?? to extract a submatrix comprising a certain number of subrows and subcolumns. + \code + sm1.innerVector(outer); // Returns the outer -th column (resp. row) of the matrix if sm is col-major (resp. row-major) + sm1.innerVectors(outer); // Returns the outer -th column (resp. row) of the matrix if mat is col-major (resp. row-major) + sm1.middleRows(start, numRows); // For row major matrices, get a range of numRows rows + sm1.middleCols(start, numCols); // For column major matrices, get a range of numCols cols + \endcode + Examples : + +\section sparseselfadjointview Sparse triangular and selfadjoint Views + \code + sm2 = sm1.triangularview(); // Get the lower triangular part of the matrix. + dv2 = sm1.triangularView().solve(dv1); // Solve the linear system with the uppper triangular part. + sm2 = sm1.selfadjointview(); // Build a selfadjoint matrix from the lower part of sm1. + \endcode + + +*/ +} diff --git a/gtsam/3rdparty/Eigen/doc/TopicLinearAlgebraDecompositions.dox b/gtsam/3rdparty/Eigen/doc/TopicLinearAlgebraDecompositions.dox index 5684a495f..faa564b93 100644 --- a/gtsam/3rdparty/Eigen/doc/TopicLinearAlgebraDecompositions.dox +++ b/gtsam/3rdparty/Eigen/doc/TopicLinearAlgebraDecompositions.dox @@ -34,7 +34,7 @@ namespace Eigen {
- Yes ExcellentBlockingBlocking, Implicit MT
- Yes ExcellentBlocking \n Soon: meta unrollerBlocking
Eigenvalues/vectors - GoodSoon: specializations for 2x2 and 3x3Closed forms for 2x2 and 3x3
+ + + + + +
\c EIGEN_USE_BLAS Enables the use of external BLAS level 2 and 3 routines (currently works with Intel MKL only)
\c EIGEN_USE_LAPACKE Enables the use of external Lapack routines via the Intel Lapacke C interface to Lapack (currently works with Intel MKL only)
\c EIGEN_USE_LAPACKE_STRICT Same as \c EIGEN_USE_LAPACKE but algorithm of lower robustness are disabled. This currently concerns only JacobiSVD which otherwise would be replaced by \c gesvd that is less robust than Jacobi rotations.
\c EIGEN_USE_MKL_VML Enables the use of Intel VML (vector operations)
\c EIGEN_USE_MKL_ALL Defines \c EIGEN_USE_BLAS, \c EIGEN_USE_LAPACKE, and \c EIGEN_USE_MKL_VML
+ +Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PARDISOSupport_Module. + + +\section TopicUsingIntelMKL_SupportedFeatures List of supported features + +The breadth of Eigen functionality covered by Intel MKL is listed in the table below. + + + + + + + + + + + +
Functional domainCode exampleMKL routines
Matrix-matrix operations \n \c EIGEN_USE_BLAS \code +m1*m2.transpose(); +m1.selfadjointView()*m2; +m1*m2.triangularView(); +m1.selfadjointView().rankUpdate(m2,1.0); +\endcode\code +?gemm +?symm/?hemm +?trmm +dsyrk/ssyrk +\endcode
Matrix-vector operations \n \c EIGEN_USE_BLAS \code +m1.adjoint()*b; +m1.selfadjointView()*b; +m1.triangularView()*b; +\endcode\code +?gemv +?symv/?hemv +?trmv +\endcode
LU decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +v1 = m1.lu().solve(v2); +\endcode\code +?getrf +\endcode
Cholesky decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +v1 = m2.selfadjointView().llt().solve(v2); +\endcode\code +?potrf +\endcode
QR decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +m1.householderQr(); +m1.colPivHouseholderQr(); +\endcode\code +?geqrf +?geqp3 +\endcode
Singular value decomposition \n \c EIGEN_USE_LAPACKE \code +JacobiSVD svd; +svd.compute(m1, ComputeThinV); +\endcode\code +?gesvd +\endcode
Eigen-value decompositions \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +EigenSolver es(m1); +ComplexEigenSolver ces(m1); +SelfAdjointEigenSolver saes(m1+m1.transpose()); +GeneralizedSelfAdjointEigenSolver + gsaes(m1+m1.transpose(),m2+m2.transpose()); +\endcode\code +?gees +?gees +?syev/?heev +?syev/?heev, +?potrf +\endcode
Schur decomposition \n \c EIGEN_USE_LAPACKE \n \c EIGEN_USE_LAPACKE_STRICT \code +RealSchur schurR(m1); +ComplexSchur schurC(m1); +\endcode\code +?gees +\endcode
Vector Math \n \c EIGEN_USE_MKL_VML \code +v2=v1.array().sin(); +v2=v1.array().asin(); +v2=v1.array().cos(); +v2=v1.array().acos(); +v2=v1.array().tan(); +v2=v1.array().exp(); +v2=v1.array().log(); +v2=v1.array().sqrt(); +v2=v1.array().square(); +v2=v1.array().pow(1.5); +\endcode\code +v?Sin +v?Asin +v?Cos +v?Acos +v?Tan +v?Exp +v?Ln +v?Sqrt +v?Sqr +v?Powx +\endcode
+In the examples, m1 and m2 are dense matrices and v1 and v2 are dense vectors. + + +\section TopicUsingIntelMKL_Links Links +- Intel MKL can be purchased and downloaded here. +- Intel MKL is also bundled with Intel Composer XE. + + +*/ + +} \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/doc/eigendoxy.css b/gtsam/3rdparty/Eigen/doc/eigendoxy.css index e62958831..c6c16286d 100644 --- a/gtsam/3rdparty/Eigen/doc/eigendoxy.css +++ b/gtsam/3rdparty/Eigen/doc/eigendoxy.css @@ -904,3 +904,8 @@ div.eimainmenu { h3.version { text-align: center; } + + +td.width20em p.endtd { + width: 20em; +} diff --git a/gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_flexible.cpp b/gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_flexible.cpp new file mode 100644 index 000000000..9d85292dd --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_flexible.cpp @@ -0,0 +1,22 @@ +#include +#include + +using namespace Eigen; + +template +void copyUpperTriangularPart(MatrixBase& dst, const MatrixBase& src) +{ + /* Note the 'template' keywords in the following line! */ + dst.template triangularView() = src.template triangularView(); +} + +int main() +{ + MatrixXi m1 = MatrixXi::Ones(5,5); + MatrixXi m2 = MatrixXi::Random(4,4); + std::cout << "m2 before copy:" << std::endl; + std::cout << m2 << std::endl << std::endl; + copyUpperTriangularPart(m2, m1.topLeftCorner(4,4)); + std::cout << "m2 after copy:" << std::endl; + std::cout << m2 << std::endl << std::endl; +} diff --git a/gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_simple.cpp b/gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_simple.cpp new file mode 100644 index 000000000..6998c1769 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/examples/TemplateKeyword_simple.cpp @@ -0,0 +1,20 @@ +#include +#include + +using namespace Eigen; + +void copyUpperTriangularPart(MatrixXf& dst, const MatrixXf& src) +{ + dst.triangularView() = src.triangularView(); +} + +int main() +{ + MatrixXf m1 = MatrixXf::Ones(4,4); + MatrixXf m2 = MatrixXf::Random(4,4); + std::cout << "m2 before copy:" << std::endl; + std::cout << m2 << std::endl << std::endl; + copyUpperTriangularPart(m2, m1); + std::cout << "m2 after copy:" << std::endl; + std::cout << m2 << std::endl << std::endl; +} diff --git a/gtsam/3rdparty/Eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp b/gtsam/3rdparty/Eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp index 9959c7909..d87c96ab1 100644 --- a/gtsam/3rdparty/Eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp +++ b/gtsam/3rdparty/Eigen/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_simple_rowwise.cpp @@ -13,7 +13,7 @@ int main() v << 0,1,2,3; //add v to each row of m - mat.rowwise() += v; + mat.rowwise() += v.transpose(); std::cout << "Broadcasting result: " << std::endl; std::cout << mat << std::endl; diff --git a/gtsam/3rdparty/Eigen/doc/snippets/Cwise_boolean_and.cpp b/gtsam/3rdparty/Eigen/doc/snippets/Cwise_boolean_and.cpp new file mode 100644 index 000000000..df6b60d92 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/snippets/Cwise_boolean_and.cpp @@ -0,0 +1,2 @@ +Array3d v(-1,2,1), w(-3,2,3); +cout << ((v lltOfA(A); // compute the Cholesky decomposition of A +MatrixXd L = lltOfA.matrixL(); // retrieve factor L in the decomposition +// The previous two lines can also be written as "L = A.llt().matrixL()" + +cout << "The Cholesky factor L is" << endl << L << endl; +cout << "To check this, let us compute L * L.transpose()" << endl; +cout << L * L.transpose() << endl; +cout << "This should equal the matrix A" << endl; diff --git a/gtsam/3rdparty/Eigen/doc/snippets/compile_snippet.cpp.in b/gtsam/3rdparty/Eigen/doc/snippets/compile_snippet.cpp.in index 474542380..894cd526c 100644 --- a/gtsam/3rdparty/Eigen/doc/snippets/compile_snippet.cpp.in +++ b/gtsam/3rdparty/Eigen/doc/snippets/compile_snippet.cpp.in @@ -1,10 +1,4 @@ -#include -#include -#include -#include -#include -#include -#include +#include #include using namespace Eigen; diff --git a/gtsam/3rdparty/Eigen/doc/special_examples/CMakeLists.txt b/gtsam/3rdparty/Eigen/doc/special_examples/CMakeLists.txt new file mode 100644 index 000000000..eeeae1d2a --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/special_examples/CMakeLists.txt @@ -0,0 +1,20 @@ + +if(NOT EIGEN_TEST_NOQT) + find_package(Qt4) + if(QT4_FOUND) + include(${QT_USE_FILE}) + endif() +endif(NOT EIGEN_TEST_NOQT) + + +if(QT4_FOUND) + add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp) + target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY}) + + add_custom_command( + TARGET Tutorial_sparse_example + POST_BUILD + COMMAND Tutorial_sparse_example + ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg + ) +endif(QT4_FOUND) diff --git a/gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example.cpp b/gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example.cpp new file mode 100644 index 000000000..002f19f01 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example.cpp @@ -0,0 +1,32 @@ +#include +#include + +typedef Eigen::SparseMatrix SpMat; // declares a column-major sparse matrix type of double +typedef Eigen::Triplet T; + +void buildProblem(std::vector& coefficients, Eigen::VectorXd& b, int n); +void saveAsBitmap(const Eigen::VectorXd& x, int n, const char* filename); + +int main(int argc, char** argv) +{ + int n = 300; // size of the image + int m = n*n; // number of unknows (=number of pixels) + + // Assembly: + std::vector coefficients; // list of non-zeros coefficients + Eigen::VectorXd b(m); // the right hand side-vector resulting from the constraints + buildProblem(coefficients, b, n); + + SpMat A(m,m); + A.setFromTriplets(coefficients.begin(), coefficients.end()); + + // Solving: + Eigen::SimplicialCholesky chol(A); // performs a Cholesky factorization of A + Eigen::VectorXd x = chol.solve(b); // use the factorization to solve for the given right hand side + + // Export the result to a file: + saveAsBitmap(x, n, argv[1]); + + return 0; +} + diff --git a/gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example_details.cpp b/gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example_details.cpp new file mode 100644 index 000000000..8c3020b63 --- /dev/null +++ b/gtsam/3rdparty/Eigen/doc/special_examples/Tutorial_sparse_example_details.cpp @@ -0,0 +1,44 @@ +#include +#include +#include + +typedef Eigen::SparseMatrix SpMat; // declares a column-major sparse matrix type of double +typedef Eigen::Triplet T; + +void insertCoefficient(int id, int i, int j, double w, std::vector& coeffs, + Eigen::VectorXd& b, const Eigen::VectorXd& boundary) +{ + int n = boundary.size(); + int id1 = i+j*n; + + if(i==-1 || i==n) b(id) -= w * boundary(j); // constrained coeffcieint + else if(j==-1 || j==n) b(id) -= w * boundary(i); // constrained coeffcieint + else coeffs.push_back(T(id,id1,w)); // unknown coefficient +} + +void buildProblem(std::vector& coefficients, Eigen::VectorXd& b, int n) +{ + b.setZero(); + Eigen::ArrayXd boundary = Eigen::ArrayXd::LinSpaced(n, 0,M_PI).sin().pow(2); + for(int j=0; j bits = (x*255).cast(); + QImage img(bits.data(), n,n,QImage::Format_Indexed8); + img.setColorCount(256); + for(int i=0;i<256;i++) img.setColor(i,qRgb(i,i,i)); + img.save(filename); +} diff --git a/gtsam/3rdparty/Eigen/lapack/CMakeLists.txt b/gtsam/3rdparty/Eigen/lapack/CMakeLists.txt index 96d6a416d..062845a3f 100644 --- a/gtsam/3rdparty/Eigen/lapack/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/lapack/CMakeLists.txt @@ -1,27 +1,23 @@ project(EigenLapack CXX) -if( NOT DEFINED EIGEN_Fortran_COMPILER_WORKS OR EIGEN_Fortran_COMPILER_WORKS) +include("../cmake/language_support.cmake") +workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) + +if(EIGEN_Fortran_COMPILER_WORKS) enable_language(Fortran OPTIONAL) - - if(CMAKE_Fortran_COMPILER_WORKS) - set(EIGEN_Fortran_COMPILER_WORKS TRUE CACHE INTERNAL "workaround cmake's enable_language issue") - else() - set(EIGEN_Fortran_COMPILER_WORKS FALSE CACHE INTERNAL "workaround cmake's enable_language issue") - endif() - endif() -if(CMAKE_Fortran_COMPILER_WORKS) - add_custom_target(lapack) include_directories(../blas) set(EigenLapack_SRCS -single.cpp double.cpp complex_single.cpp complex_double.cpp +single.cpp double.cpp complex_single.cpp complex_double.cpp ../blas/xerbla.cpp ) +if(EIGEN_Fortran_COMPILER_WORKS) + get_filename_component(eigen_full_path_to_reference_to_reference_lapack "./reference/" ABSOLUTE) if(EXISTS ${eigen_full_path_to_reference_to_reference_lapack}) set(EigenLapack_SRCS ${EigenLapack_SRCS} @@ -357,6 +353,8 @@ reference/ctbcon.f reference/dormhr.f reference/sla_ ) endif() +endif(EIGEN_Fortran_COMPILER_WORKS) + add_library(eigen_lapack_static ${EigenLapack_SRCS}) add_library(eigen_lapack SHARED ${EigenLapack_SRCS}) @@ -365,14 +363,12 @@ if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) target_link_libraries(eigen_lapack ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) endif() -# add_dependencies(lapack eigen_lapack eigen_lapack_static) -add_dependencies(lapack eigen_lapack_static) - -# install(TARGETS eigen_lapack -# RUNTIME DESTINATION bin -# LIBRARY DESTINATION lib -# ARCHIVE DESTINATION lib) +add_dependencies(lapack eigen_lapack eigen_lapack_static) +install(TARGETS eigen_lapack eigen_lapack_static + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib) # add_subdirectory(testing) -endif(CMAKE_Fortran_COMPILER_WORKS) + diff --git a/gtsam/3rdparty/Eigen/lapack/cholesky.cpp b/gtsam/3rdparty/Eigen/lapack/cholesky.cpp index c3a72c3c5..c51a8a29b 100644 --- a/gtsam/3rdparty/Eigen/lapack/cholesky.cpp +++ b/gtsam/3rdparty/Eigen/lapack/cholesky.cpp @@ -41,8 +41,8 @@ EIGEN_LAPACK_FUNC(potrf,(char* uplo, int *n, RealScalar *pa, int *lda, int *info Scalar* a = reinterpret_cast(pa); MatrixType A(a,*n,*n,*lda); int ret; - if(UPLO(*uplo)==UP) ret = internal::llt_inplace::blocked(A); - else ret = internal::llt_inplace::blocked(A); + if(UPLO(*uplo)==UP) ret = internal::llt_inplace::blocked(A); + else ret = internal::llt_inplace::blocked(A); if(ret>=0) *info = ret+1; diff --git a/gtsam/3rdparty/Eigen/scripts/eigen_gen_docs b/gtsam/3rdparty/Eigen/scripts/eigen_gen_docs index e97e9ab8f..921d600ed 100644 --- a/gtsam/3rdparty/Eigen/scripts/eigen_gen_docs +++ b/gtsam/3rdparty/Eigen/scripts/eigen_gen_docs @@ -8,14 +8,15 @@ USER=${USER:-'orzel'} #ulimit -v 1024000 # step 1 : build -# todo if 'build is not there, create one: mkdir build -p (cd build && cmake .. && make doc) || { echo "make failed"; exit 1; } -#todo: n+1 where n = number of cpus #step 2 : upload -# (the '/' at the end of path are very important, see rsync documentation) -rsync -az --no-p build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-3.0/ || { echo "upload failed"; exit 1; } +# (the '/' at the end of path is very important, see rsync documentation) +rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-devel/ || { echo "upload failed"; exit 1; } + +#step 3 : fix the perm +ssh $USER@ssh.tuxfamily.org 'chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/dox-devel' || { echo "perm failed"; exit 1; } echo "Uploaded successfully" diff --git a/gtsam/3rdparty/Eigen/test/CMakeLists.txt b/gtsam/3rdparty/Eigen/test/CMakeLists.txt index fab7de0d4..6f8fc4ae3 100644 --- a/gtsam/3rdparty/Eigen/test/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/test/CMakeLists.txt @@ -1,17 +1,90 @@ -find_package(GSL) -if(GSL_FOUND AND GSL_VERSION_MINOR LESS 9) - set(GSL_FOUND "") -endif(GSL_FOUND AND GSL_VERSION_MINOR LESS 9) -if(GSL_FOUND) - add_definitions("-DHAS_GSL" ${GSL_DEFINITIONS}) - include_directories(${GSL_INCLUDE_DIR}) - ei_add_property(EIGEN_TESTED_BACKENDS "GSL, ") -else(GSL_FOUND) - ei_add_property(EIGEN_MISSING_BACKENDS "GSL, ") - set(GSL_LIBRARIES "") -endif(GSL_FOUND) +# generate split test header file +message(STATUS ${CMAKE_CURRENT_BINARY_DIR}) +file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "") +foreach(i RANGE 1 999) + file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h + "#ifdef EIGEN_TEST_PART_${i}\n" + "#define CALL_SUBTEST_${i}(FUNC) CALL_SUBTEST(FUNC)\n" + "#else\n" + "#define CALL_SUBTEST_${i}(FUNC)\n" + "#endif\n\n" + ) +endforeach() +# configure blas/lapack (use Eigen's ones) +set(BLAS_FOUND TRUE) +set(LAPACK_FOUND TRUE) +set(BLAS_LIBRARIES eigen_blas) +set(LAPACK_LIBRARIES eigen_lapack) + +set(EIGEN_TEST_MATRIX_DIR "" CACHE STRING "Enable testing of realword sparse matrices contained in the specified path") +if(EIGEN_TEST_MATRIX_DIR) + if(NOT WIN32) + message(STATUS "Test realworld sparse matrices: ${EIGEN_TEST_MATRIX_DIR}") + add_definitions( -DTEST_REAL_CASES="${EIGEN_TEST_MATRIX_DIR}" ) + else(NOT WIN32) + message(STATUS "REAL CASES CAN NOT BE CURRENTLY TESTED ON WIN32") + endif(NOT WIN32) +endif(EIGEN_TEST_MATRIX_DIR) + +set(SPARSE_LIBS " ") + +find_package(Cholmod) +if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND) + add_definitions("-DEIGEN_CHOLMOD_SUPPORT") + include_directories(${CHOLMOD_INCLUDES}) + set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES}) + set(CHOLMOD_ALL_LIBS ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES}) + ei_add_property(EIGEN_TESTED_BACKENDS "Cholmod, ") +else() + ei_add_property(EIGEN_MISSING_BACKENDS "Cholmod, ") +endif() + +find_package(Umfpack) +if(UMFPACK_FOUND AND BLAS_FOUND) + add_definitions("-DEIGEN_UMFPACK_SUPPORT") + include_directories(${UMFPACK_INCLUDES}) + set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES}) + set(UMFPACK_ALL_LIBS ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES}) + ei_add_property(EIGEN_TESTED_BACKENDS "UmfPack, ") +else() + ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ") +endif() + +find_package(SuperLU) +if(SUPERLU_FOUND AND BLAS_FOUND) + add_definitions("-DEIGEN_SUPERLU_SUPPORT") + include_directories(${SUPERLU_INCLUDES}) + set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES}) + set(SUPERLU_ALL_LIBS ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES}) + ei_add_property(EIGEN_TESTED_BACKENDS "SuperLU, ") +else() + ei_add_property(EIGEN_MISSING_BACKENDS "SuperLU, ") +endif() + + +find_package(Pastix) +find_package(Scotch) +find_package(Metis) +if(PASTIX_FOUND AND BLAS_FOUND) + add_definitions("-DEIGEN_PASTIX_SUPPORT") + include_directories(${PASTIX_INCLUDES}) + if(SCOTCH_FOUND) + include_directories(${SCOTCH_INCLUDES}) + set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${SCOTCH_LIBRARIES}) + elseif(METIS_FOUND) + include_directories(${METIS_INCLUDES}) + set(PASTIX_LIBRARIES ${PASTIX_LIBRARIES} ${METIS_LIBRARIES}) + else(SCOTCH_FOUND) + ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ") + endif(SCOTCH_FOUND) + set(SPARSE_LIBS ${SPARSE_LIBS} ${PASTIX_LIBRARIES} ${ORDERING_LIBRARIES} ${BLAS_LIBRARIES}) + set(PASTIX_ALL_LIBS ${PASTIX_LIBRARIES} ${BLAS_LIBRARIES}) + ei_add_property(EIGEN_TESTED_BACKENDS "PaStiX, ") +else() + ei_add_property(EIGEN_MISSING_BACKENDS "PaStiX, ") +endif() option(EIGEN_TEST_NOQT "Disable Qt support in unit tests" OFF) if(NOT EIGEN_TEST_NOQT) @@ -75,7 +148,7 @@ ei_add_test(product_mmtr) ei_add_test(product_notemporary) ei_add_test(stable_norm) ei_add_test(bandmatrix) -ei_add_test(cholesky "" "${GSL_LIBRARIES}") +ei_add_test(cholesky) ei_add_test(lu) ei_add_test(determinant) ei_add_test(inverse) @@ -86,8 +159,8 @@ ei_add_test(upperbidiagonalization) ei_add_test(hessenberg) ei_add_test(schur_real) ei_add_test(schur_complex) -ei_add_test(eigensolver_selfadjoint "" "${GSL_LIBRARIES}") -ei_add_test(eigensolver_generic "" "${GSL_LIBRARIES}") +ei_add_test(eigensolver_selfadjoint) +ei_add_test(eigensolver_generic) ei_add_test(eigensolver_complex) ei_add_test(jacobi) ei_add_test(jacobisvd) @@ -110,12 +183,13 @@ endif(QT4_FOUND) ei_add_test(sparse_vector) ei_add_test(sparse_basic) ei_add_test(sparse_product) -ei_add_test(sparse_solvers "" "${SPARSE_LIBS}") +ei_add_test(sparse_solvers) ei_add_test(umeyama) ei_add_test(householder) ei_add_test(swap) ei_add_test(conservative_resize) ei_add_test(permutationmatrices) +ei_add_test(sparse_permutations) ei_add_test(eigen2support) ei_add_test(nullary) ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}") @@ -123,6 +197,32 @@ ei_add_test(zerosized) ei_add_test(dontalign) ei_add_test(sizeoverflow) ei_add_test(prec_inverse_4x4) +ei_add_test(vectorwiseop) + +ei_add_test(simplicial_cholesky) +ei_add_test(conjugate_gradient) +ei_add_test(bicgstab) + + +if(UMFPACK_FOUND) + ei_add_test(umfpack_support "" "${UMFPACK_ALL_LIBS}") +endif() + +if(SUPERLU_FOUND) + ei_add_test(superlu_support "" "${SUPERLU_ALL_LIBS}") +endif() + +if(CHOLMOD_FOUND) + ei_add_test(cholmod_support "" "${CHOLMOD_ALL_LIBS}") +endif() + +if(PARDISO_FOUND) + ei_add_test(pardiso_support "" "${PARDISO_ALL_LIBS}") +endif() + +if(PASTIX_FOUND AND (SCOTCH_FOUND OR METIS_FOUND)) + ei_add_test(pastix_support "" "${PASTIX_ALL_LIBS}") +endif() string(TOLOWER "${CMAKE_CXX_COMPILER}" cmake_cxx_compiler_tolower) if(cmake_cxx_compiler_tolower MATCHES "qcc") diff --git a/gtsam/3rdparty/Eigen/test/adjoint.cpp b/gtsam/3rdparty/Eigen/test/adjoint.cpp index d8d2aaa1b..140283477 100644 --- a/gtsam/3rdparty/Eigen/test/adjoint.cpp +++ b/gtsam/3rdparty/Eigen/test/adjoint.cpp @@ -43,8 +43,6 @@ template void adjoint(const MatrixType& m) MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols), - mzero = MatrixType::Zero(rows, cols), - identity = SquareMatrixType::Identity(rows, rows), square = SquareMatrixType::Random(rows, rows); VectorType v1 = VectorType::Random(rows), v2 = VectorType::Random(rows), @@ -65,15 +63,23 @@ template void adjoint(const MatrixType& m) // check basic properties of dot, norm, norm2 typedef typename NumTraits::Real RealScalar; - RealScalar ref = NumTraits::IsInteger ? 0 : (std::max)((s1 * v1 + s2 * v2).norm(),v3.norm()); + RealScalar ref = NumTraits::IsInteger ? RealScalar(0) : (std::max)((s1 * v1 + s2 * v2).norm(),v3.norm()); VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), internal::conj(s1) * v1.dot(v3) + internal::conj(s2) * v2.dot(v3), ref)); VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), ref)); VERIFY_IS_APPROX(internal::conj(v1.dot(v2)), v2.dot(v1)); VERIFY_IS_APPROX(internal::real(v1.dot(v1)), v1.squaredNorm()); - if(!NumTraits::IsInteger) + if(!NumTraits::IsInteger) { VERIFY_IS_APPROX(v1.squaredNorm(), v1.norm() * v1.norm()); + // check normalized() and normalize() + VERIFY_IS_APPROX(v1, v1.norm() * v1.normalized()); + v3 = v1; + v3.normalize(); + VERIFY_IS_APPROX(v1, v1.norm() * v3); + VERIFY_IS_APPROX(v3, v1.normalized()); + VERIFY_IS_APPROX(v3.norm(), RealScalar(1)); + } VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(vzero.dot(v1)), static_cast(1)); - + // check compatibility of dot and adjoint ref = NumTraits::IsInteger ? 0 : (std::max)((std::max)(v1.norm(),v2.norm()),(std::max)((square * v2).norm(),(square.adjoint() * v1).norm())); @@ -119,11 +125,11 @@ void test_adjoint() CALL_SUBTEST_1( adjoint(Matrix()) ); CALL_SUBTEST_2( adjoint(Matrix3d()) ); CALL_SUBTEST_3( adjoint(Matrix4f()) ); - CALL_SUBTEST_4( adjoint(MatrixXcf(internal::random(1,50), internal::random(1,50))) ); - CALL_SUBTEST_5( adjoint(MatrixXi(internal::random(1,50), internal::random(1,50))) ); - CALL_SUBTEST_6( adjoint(MatrixXf(internal::random(1,50), internal::random(1,50))) ); + CALL_SUBTEST_4( adjoint(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_5( adjoint(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( adjoint(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } - // test a large matrix only once + // test a large static matrix only once CALL_SUBTEST_7( adjoint(Matrix()) ); #ifdef EIGEN_TEST_PART_4 diff --git a/gtsam/3rdparty/Eigen/test/array.cpp b/gtsam/3rdparty/Eigen/test/array.cpp index 6964075ea..912c28c88 100644 --- a/gtsam/3rdparty/Eigen/test/array.cpp +++ b/gtsam/3rdparty/Eigen/test/array.cpp @@ -43,7 +43,7 @@ template void array(const ArrayType& m) RowVectorType rv1 = RowVectorType::Random(cols); Scalar s1 = internal::random(), - s2 = internal::random(); + s2 = internal::random(); // scalar addition VERIFY_IS_APPROX(m1 + s1, s1 + m1); @@ -149,6 +149,12 @@ template void comparisons(const ArrayType& m) // count VERIFY(((m1.abs()+1)>RealScalar(0.1)).count() == rows*cols); + // and/or + VERIFY( (m1RealScalar(0)).count() == 0); + VERIFY( (m1=RealScalar(0)).count() == rows*cols); + RealScalar a = m1.abs().mean(); + VERIFY( (m1<-a || m1>a).count() == (m1.abs()>a).count()); + typedef Array ArrayOfIndices; // TODO allows colwise/rowwise for array @@ -169,7 +175,9 @@ template void array_real(const ArrayType& m) m2 = ArrayType::Random(rows, cols), m3(rows, cols); - // these these are mostly to check possible compilation issues. + Scalar s1 = internal::random(); + + // these tests are mostly to check possible compilation issues. VERIFY_IS_APPROX(m1.sin(), std::sin(m1)); VERIFY_IS_APPROX(m1.sin(), internal::sin(m1)); VERIFY_IS_APPROX(m1.cos(), std::cos(m1)); @@ -180,7 +188,7 @@ template void array_real(const ArrayType& m) VERIFY_IS_APPROX(m1.acos(), internal::acos(m1)); VERIFY_IS_APPROX(m1.tan(), std::tan(m1)); VERIFY_IS_APPROX(m1.tan(), internal::tan(m1)); - + VERIFY_IS_APPROX(internal::cos(m1+RealScalar(3)*m2), internal::cos((m1+RealScalar(3)*m2).eval())); VERIFY_IS_APPROX(std::cos(m1+RealScalar(3)*m2), std::cos((m1+RealScalar(3)*m2).eval())); @@ -203,9 +211,67 @@ template void array_real(const ArrayType& m) VERIFY_IS_APPROX(m1.pow(2), m1.square()); VERIFY_IS_APPROX(std::pow(m1,2), m1.square()); + + ArrayType exponents = ArrayType::Constant(rows, cols, RealScalar(2)); + VERIFY_IS_APPROX(std::pow(m1,exponents), m1.square()); + m3 = m1.abs(); VERIFY_IS_APPROX(m3.pow(RealScalar(0.5)), m3.sqrt()); VERIFY_IS_APPROX(std::pow(m3,RealScalar(0.5)), m3.sqrt()); + + // scalar by array division + const RealScalar tiny = std::sqrt(std::numeric_limits::epsilon()); + s1 += Scalar(tiny); + m1 += ArrayType::Constant(rows,cols,Scalar(tiny)); + VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse()); +} + +template void array_complex(const ArrayType& m) +{ + typedef typename ArrayType::Index Index; + + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2(rows, cols); + + for (Index i = 0; i < m.rows(); ++i) + for (Index j = 0; j < m.cols(); ++j) + m2(i,j) = std::sqrt(m1(i,j)); + + VERIFY_IS_APPROX(m1.sqrt(), m2); + VERIFY_IS_APPROX(m1.sqrt(), std::sqrt(m1)); + VERIFY_IS_APPROX(m1.sqrt(), internal::sqrt(m1)); +} + +template void min_max(const ArrayType& m) +{ + typedef typename ArrayType::Index Index; + typedef typename ArrayType::Scalar Scalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + ArrayType m1 = ArrayType::Random(rows, cols); + + // min/max with array + Scalar maxM1 = m1.maxCoeff(); + Scalar minM1 = m1.minCoeff(); + + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)(ArrayType::Constant(rows,cols, minM1))); + VERIFY_IS_APPROX(m1, (m1.min)(ArrayType::Constant(rows,cols, maxM1))); + + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)(ArrayType::Constant(rows,cols, maxM1))); + VERIFY_IS_APPROX(m1, (m1.max)(ArrayType::Constant(rows,cols, minM1))); + + // min/max with scalar input + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)( minM1)); + VERIFY_IS_APPROX(m1, (m1.min)( maxM1)); + + VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)( maxM1)); + VERIFY_IS_APPROX(m1, (m1.max)( minM1)); + } void test_array() @@ -214,22 +280,32 @@ void test_array() CALL_SUBTEST_1( array(Array()) ); CALL_SUBTEST_2( array(Array22f()) ); CALL_SUBTEST_3( array(Array44d()) ); - CALL_SUBTEST_4( array(ArrayXXcf(3, 3)) ); - CALL_SUBTEST_5( array(ArrayXXf(8, 12)) ); - CALL_SUBTEST_6( array(ArrayXXi(8, 12)) ); + CALL_SUBTEST_4( array(ArrayXXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_5( array(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( comparisons(Array()) ); CALL_SUBTEST_2( comparisons(Array22f()) ); CALL_SUBTEST_3( comparisons(Array44d()) ); - CALL_SUBTEST_5( comparisons(ArrayXXf(8, 12)) ); - CALL_SUBTEST_6( comparisons(ArrayXXi(8, 12)) ); + CALL_SUBTEST_5( comparisons(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( comparisons(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( min_max(Array()) ); + CALL_SUBTEST_2( min_max(Array22f()) ); + CALL_SUBTEST_3( min_max(Array44d()) ); + CALL_SUBTEST_5( min_max(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( min_max(ArrayXXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array_real(Array()) ); CALL_SUBTEST_2( array_real(Array22f()) ); CALL_SUBTEST_3( array_real(Array44d()) ); - CALL_SUBTEST_5( array_real(ArrayXXf(8, 12)) ); + CALL_SUBTEST_5( array_real(ArrayXXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_4( array_complex(ArrayXXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } VERIFY((internal::is_same< internal::global_math_functions_filtering_base::type, int >::value)); diff --git a/gtsam/3rdparty/Eigen/test/array_for_matrix.cpp b/gtsam/3rdparty/Eigen/test/array_for_matrix.cpp index 45e0930ce..465b8998b 100644 --- a/gtsam/3rdparty/Eigen/test/array_for_matrix.cpp +++ b/gtsam/3rdparty/Eigen/test/array_for_matrix.cpp @@ -156,30 +156,65 @@ template void lpNorm(const VectorType& v) VERIFY_IS_APPROX(internal::pow(u.template lpNorm<5>(), typename VectorType::RealScalar(5)), u.array().abs().pow(5).sum()); } +template void cwise_min_max(const MatrixType& m) +{ + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + + Index rows = m.rows(); + Index cols = m.cols(); + + MatrixType m1 = MatrixType::Random(rows, cols); + + // min/max with array + Scalar maxM1 = m1.maxCoeff(); + Scalar minM1 = m1.minCoeff(); + + VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1), m1.cwiseMin(MatrixType::Constant(rows,cols, minM1))); + VERIFY_IS_APPROX(m1, m1.cwiseMin(MatrixType::Constant(rows,cols, maxM1))); + + VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1), m1.cwiseMax(MatrixType::Constant(rows,cols, maxM1))); + VERIFY_IS_APPROX(m1, m1.cwiseMax(MatrixType::Constant(rows,cols, minM1))); + + // min/max with scalar input + VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1), m1.cwiseMin( minM1)); + VERIFY_IS_APPROX(m1, m1.cwiseMin( maxM1)); + + VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1), m1.cwiseMax( maxM1)); + VERIFY_IS_APPROX(m1, m1.cwiseMax( minM1)); + +} + void test_array_for_matrix() { - int maxsize = 40; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( array_for_matrix(Matrix()) ); CALL_SUBTEST_2( array_for_matrix(Matrix2f()) ); CALL_SUBTEST_3( array_for_matrix(Matrix4d()) ); - CALL_SUBTEST_4( array_for_matrix(MatrixXcf(internal::random(1,maxsize), internal::random(1,maxsize))) ); - CALL_SUBTEST_5( array_for_matrix(MatrixXf(internal::random(1,maxsize), internal::random(1,maxsize))) ); - CALL_SUBTEST_6( array_for_matrix(MatrixXi(internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_4( array_for_matrix(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_5( array_for_matrix(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( array_for_matrix(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( comparisons(Matrix()) ); CALL_SUBTEST_2( comparisons(Matrix2f()) ); CALL_SUBTEST_3( comparisons(Matrix4d()) ); - CALL_SUBTEST_5( comparisons(MatrixXf(internal::random(1,maxsize), internal::random(1,maxsize))) ); - CALL_SUBTEST_6( comparisons(MatrixXi(internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_5( comparisons(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( comparisons(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + } + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( cwise_min_max(Matrix()) ); + CALL_SUBTEST_2( cwise_min_max(Matrix2f()) ); + CALL_SUBTEST_3( cwise_min_max(Matrix4d()) ); + CALL_SUBTEST_5( cwise_min_max(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( cwise_min_max(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( lpNorm(Matrix()) ); CALL_SUBTEST_2( lpNorm(Vector2f()) ); CALL_SUBTEST_7( lpNorm(Vector3d()) ); CALL_SUBTEST_8( lpNorm(Vector4f()) ); - CALL_SUBTEST_5( lpNorm(VectorXf(internal::random(1,maxsize))) ); - CALL_SUBTEST_4( lpNorm(VectorXcf(internal::random(1,maxsize))) ); + CALL_SUBTEST_5( lpNorm(VectorXf(internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_4( lpNorm(VectorXcf(internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } } diff --git a/gtsam/3rdparty/Eigen/test/basicstuff.cpp b/gtsam/3rdparty/Eigen/test/basicstuff.cpp index 9f3966818..76ecffd68 100644 --- a/gtsam/3rdparty/Eigen/test/basicstuff.cpp +++ b/gtsam/3rdparty/Eigen/test/basicstuff.cpp @@ -42,11 +42,8 @@ template void basicStuff(const MatrixType& m) m2 = MatrixType::Random(rows, cols), m3(rows, cols), mzero = MatrixType::Zero(rows, cols), - identity = Matrix - ::Identity(rows, rows), square = Matrix::Random(rows, rows); VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), vzero = VectorType::Zero(rows); SquareMatrixType sm1 = SquareMatrixType::Random(rows,rows), sm2(rows,rows); @@ -215,14 +212,14 @@ void test_basicstuff() for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( basicStuff(Matrix()) ); CALL_SUBTEST_2( basicStuff(Matrix4d()) ); - CALL_SUBTEST_3( basicStuff(MatrixXcf(internal::random(1,100), internal::random(1,100))) ); - CALL_SUBTEST_4( basicStuff(MatrixXi(internal::random(1,100), internal::random(1,100))) ); - CALL_SUBTEST_5( basicStuff(MatrixXcd(internal::random(1,100), internal::random(1,100))) ); + CALL_SUBTEST_3( basicStuff(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_4( basicStuff(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_5( basicStuff(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( basicStuff(Matrix()) ); - CALL_SUBTEST_7( basicStuff(Matrix(internal::random(1,100),internal::random(1,100))) ); + CALL_SUBTEST_7( basicStuff(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); - CALL_SUBTEST_3( basicStuffComplex(MatrixXcf(internal::random(1,100), internal::random(1,100))) ); - CALL_SUBTEST_5( basicStuffComplex(MatrixXcd(internal::random(1,100), internal::random(1,100))) ); + CALL_SUBTEST_3( basicStuffComplex(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_5( basicStuffComplex(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } CALL_SUBTEST_1(fixedSizeMatrixConstruction()); diff --git a/gtsam/3rdparty/Eigen/test/bicgstab.cpp b/gtsam/3rdparty/Eigen/test/bicgstab.cpp new file mode 100644 index 000000000..2b6403583 --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/bicgstab.cpp @@ -0,0 +1,45 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse_solver.h" +#include + +template void test_bicgstab_T() +{ + BiCGSTAB, DiagonalPreconditioner > bicgstab_colmajor_diag; + BiCGSTAB, IdentityPreconditioner > bicgstab_colmajor_I; + BiCGSTAB, IncompleteLUT > bicgstab_colmajor_ilut; + //BiCGSTAB, SSORPreconditioner > bicgstab_colmajor_ssor; + + CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_diag) ); +// CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_I) ); + CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ilut) ); + //CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ssor) ); +} + +void test_bicgstab() +{ + CALL_SUBTEST_1(test_bicgstab_T()); + CALL_SUBTEST_2(test_bicgstab_T >()); +} diff --git a/gtsam/3rdparty/Eigen/test/block.cpp b/gtsam/3rdparty/Eigen/test/block.cpp index 70852ee48..07d5ce792 100644 --- a/gtsam/3rdparty/Eigen/test/block.cpp +++ b/gtsam/3rdparty/Eigen/test/block.cpp @@ -42,12 +42,8 @@ template void block(const MatrixType& m) m1_copy = m1, m2 = MatrixType::Random(rows, cols), m3(rows, cols), - mzero = MatrixType::Zero(rows, cols), ones = MatrixType::Ones(rows, cols); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - v3 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + VectorType v1 = VectorType::Random(rows); Scalar s1 = internal::random(); diff --git a/gtsam/3rdparty/Eigen/test/cholesky.cpp b/gtsam/3rdparty/Eigen/test/cholesky.cpp index ae4342cee..4f2516d26 100644 --- a/gtsam/3rdparty/Eigen/test/cholesky.cpp +++ b/gtsam/3rdparty/Eigen/test/cholesky.cpp @@ -41,9 +41,37 @@ static int nb_temporaries; VERIFY( (#XPR) && nb_temporaries==N ); \ } -#ifdef HAS_GSL -#include "gsl_helper.h" -#endif +template class CholType> void test_chol_update(const MatrixType& symm) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef Matrix VectorType; + + MatrixType symmLo = symm.template triangularView(); + MatrixType symmUp = symm.template triangularView(); + MatrixType symmCpy = symm; + + CholType chollo(symmLo); + CholType cholup(symmUp); + + for (int k=0; k<10; ++k) + { + VectorType vec = VectorType::Random(symm.rows()); + RealScalar sigma = internal::random(); + symmCpy += sigma * vec * vec.adjoint(); + + // we are doing some downdates, so it might be the case that the matrix is not SPD anymore + CholType chol(symmCpy); + if(chol.info()!=Success) + break; + + chollo.rankUpdate(vec, sigma); + VERIFY_IS_APPROX(symmCpy, chollo.reconstructedMatrix()); + + cholup.rankUpdate(vec, sigma); + VERIFY_IS_APPROX(symmCpy, cholup.reconstructedMatrix()); + } +} template void cholesky(const MatrixType& m) { @@ -77,34 +105,6 @@ template void cholesky(const MatrixType& m) // FIXME: currently that fails !! //symm.template part().setZero(); - #ifdef HAS_GSL -// if (internal::is_same::value) -// { -// typedef GslTraits Gsl; -// typename Gsl::Matrix gMatA=0, gSymm=0; -// typename Gsl::Vector gVecB=0, gVecX=0; -// convert(symm, gSymm); -// convert(symm, gMatA); -// convert(vecB, gVecB); -// convert(vecB, gVecX); -// Gsl::cholesky(gMatA); -// Gsl::cholesky_solve(gMatA, gVecB, gVecX); -// VectorType vecX(rows), _vecX, _vecB; -// convert(gVecX, _vecX); -// symm.llt().solve(vecB, &vecX); -// Gsl::prod(gSymm, gVecX, gVecB); -// convert(gVecB, _vecB); -// // test gsl itself ! -// VERIFY_IS_APPROX(vecB, _vecB); -// VERIFY_IS_APPROX(vecX, _vecX); -// -// Gsl::free(gMatA); -// Gsl::free(gSymm); -// Gsl::free(gVecB); -// Gsl::free(gVecX); -// } - #endif - { LLT chollo(symmLo); VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); @@ -124,6 +124,11 @@ template void cholesky(const MatrixType& m) MatrixType neg = -symmLo; chollo.compute(neg); VERIFY(chollo.info()==NumericalIssue); + + VERIFY_IS_APPROX(MatrixType(chollo.matrixL().transpose().conjugate()), MatrixType(chollo.matrixU())); + VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL())); + VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU())); + VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL())); } // LDLT @@ -152,6 +157,11 @@ template void cholesky(const MatrixType& m) matX = ldltup.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); + VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU())); + VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL())); + VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU())); + VERIFY_IS_APPROX(MatrixType(ldltup.matrixU().transpose().conjugate()), MatrixType(ldltup.matrixL())); + if(MatrixType::RowsAtCompileTime==Dynamic) { // note : each inplace permutation requires a small temporary vector (mask) @@ -166,6 +176,10 @@ template void cholesky(const MatrixType& m) VERIFY_EVALUATION_COUNT(matX = ldltup.solve(matX), 0); VERIFY_IS_APPROX(matX, ldltup.solve(matB).eval()); } + + // restore + if(sign == -1) + symm = -symm; } // test some special use cases of SelfCwiseBinaryOp: @@ -182,7 +196,10 @@ template void cholesky(const MatrixType& m) m2 = m1; m2.noalias() -= symmLo.template selfadjointView().llt().solve(matB); VERIFY_IS_APPROX(m2, m1 - symmLo.template selfadjointView().llt().solve(matB)); - + + // update/downdate + CALL_SUBTEST(( test_chol_update(symm) )); + CALL_SUBTEST(( test_chol_update(symm) )); } template void cholesky_cplx(const MatrixType& m) @@ -242,7 +259,6 @@ template void cholesky_cplx(const MatrixType& m) // matX = ldltlo.solve(matB); // VERIFY_IS_APPROX(symm * matX, matB); } - } // regression test for bug 241 @@ -290,9 +306,9 @@ void test_cholesky() CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) ); CALL_SUBTEST_4( cholesky(Matrix3f()) ); CALL_SUBTEST_5( cholesky(Matrix4d()) ); - s = internal::random(1,200); + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) ); - s = internal::random(1,100); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) ); } @@ -304,4 +320,6 @@ void test_cholesky() // Test problem size constructors CALL_SUBTEST_9( LLT(10) ); CALL_SUBTEST_9( LDLT(10) ); + + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/cholmod_support.cpp b/gtsam/3rdparty/Eigen/test/cholmod_support.cpp new file mode 100644 index 000000000..1ebba2438 --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/cholmod_support.cpp @@ -0,0 +1,71 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse_solver.h" + +#include + +template void test_cholmod_T() +{ + CholmodDecomposition, Lower> g_chol_colmajor_lower; g_chol_colmajor_lower.setMode(CholmodSupernodalLLt); + CholmodDecomposition, Upper> g_chol_colmajor_upper; g_chol_colmajor_upper.setMode(CholmodSupernodalLLt); + CholmodDecomposition, Lower> g_llt_colmajor_lower; g_llt_colmajor_lower.setMode(CholmodSimplicialLLt); + CholmodDecomposition, Upper> g_llt_colmajor_upper; g_llt_colmajor_upper.setMode(CholmodSimplicialLLt); + CholmodDecomposition, Lower> g_ldlt_colmajor_lower; g_ldlt_colmajor_lower.setMode(CholmodLDLt); + CholmodDecomposition, Upper> g_ldlt_colmajor_upper; g_ldlt_colmajor_upper.setMode(CholmodLDLt); + + CholmodSupernodalLLT, Lower> chol_colmajor_lower; + CholmodSupernodalLLT, Upper> chol_colmajor_upper; + CholmodSimplicialLLT, Lower> llt_colmajor_lower; + CholmodSimplicialLLT, Upper> llt_colmajor_upper; + CholmodSimplicialLDLT, Lower> ldlt_colmajor_lower; + CholmodSimplicialLDLT, Upper> ldlt_colmajor_upper; + + check_sparse_spd_solving(g_chol_colmajor_lower); + check_sparse_spd_solving(g_chol_colmajor_upper); + check_sparse_spd_solving(g_llt_colmajor_lower); + check_sparse_spd_solving(g_llt_colmajor_upper); + check_sparse_spd_solving(g_ldlt_colmajor_lower); + check_sparse_spd_solving(g_ldlt_colmajor_upper); + + check_sparse_spd_solving(chol_colmajor_lower); + check_sparse_spd_solving(chol_colmajor_upper); + check_sparse_spd_solving(llt_colmajor_lower); + check_sparse_spd_solving(llt_colmajor_upper); + check_sparse_spd_solving(ldlt_colmajor_lower); + check_sparse_spd_solving(ldlt_colmajor_upper); + +// check_sparse_spd_determinant(chol_colmajor_lower); +// check_sparse_spd_determinant(chol_colmajor_upper); +// check_sparse_spd_determinant(llt_colmajor_lower); +// check_sparse_spd_determinant(llt_colmajor_upper); +// check_sparse_spd_determinant(ldlt_colmajor_lower); +// check_sparse_spd_determinant(ldlt_colmajor_upper); +} + +void test_cholmod_support() +{ + CALL_SUBTEST_1(test_cholmod_T()); + CALL_SUBTEST_2(test_cholmod_T >()); +} diff --git a/gtsam/3rdparty/Eigen/test/conjugate_gradient.cpp b/gtsam/3rdparty/Eigen/test/conjugate_gradient.cpp new file mode 100644 index 000000000..f24f35817 --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/conjugate_gradient.cpp @@ -0,0 +1,45 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse_solver.h" +#include + +template void test_conjugate_gradient_T() +{ + ConjugateGradient, Lower> cg_colmajor_lower_diag; + ConjugateGradient, Upper> cg_colmajor_upper_diag; + ConjugateGradient, Lower, IdentityPreconditioner> cg_colmajor_lower_I; + ConjugateGradient, Upper, IdentityPreconditioner> cg_colmajor_upper_I; + + CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_diag) ); + CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_diag) ); + CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_I) ); + CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) ); +} + +void test_conjugate_gradient() +{ + CALL_SUBTEST_1(test_conjugate_gradient_T()); + CALL_SUBTEST_2(test_conjugate_gradient_T >()); +} diff --git a/gtsam/3rdparty/Eigen/test/cwiseop.cpp b/gtsam/3rdparty/Eigen/test/cwiseop.cpp index 07ef599be..b3ca94e3a 100644 --- a/gtsam/3rdparty/Eigen/test/cwiseop.cpp +++ b/gtsam/3rdparty/Eigen/test/cwiseop.cpp @@ -60,11 +60,8 @@ template void cwiseops(const MatrixType& m) mzero = MatrixType::Zero(rows, cols), mones = MatrixType::Ones(rows, cols), identity = Matrix - ::Identity(rows, rows), - square = Matrix::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows), + ::Identity(rows, rows); + VectorType vzero = VectorType::Zero(rows), vones = VectorType::Ones(rows), v3(rows); @@ -175,9 +172,9 @@ void test_cwiseop() for(int i = 0; i < g_repeat ; i++) { CALL_SUBTEST_1( cwiseops(Matrix()) ); CALL_SUBTEST_2( cwiseops(Matrix4d()) ); - CALL_SUBTEST_3( cwiseops(MatrixXf(3, 3)) ); - CALL_SUBTEST_4( cwiseops(MatrixXf(22, 22)) ); - CALL_SUBTEST_5( cwiseops(MatrixXi(8, 12)) ); - CALL_SUBTEST_6( cwiseops(MatrixXd(20, 20)) ); + CALL_SUBTEST_3( cwiseops(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_4( cwiseops(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_5( cwiseops(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( cwiseops(MatrixXd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } } diff --git a/gtsam/3rdparty/Eigen/test/determinant.cpp b/gtsam/3rdparty/Eigen/test/determinant.cpp index dcf64387d..6c8d3baab 100644 --- a/gtsam/3rdparty/Eigen/test/determinant.cpp +++ b/gtsam/3rdparty/Eigen/test/determinant.cpp @@ -68,13 +68,15 @@ template void determinant(const MatrixType& m) void test_determinant() { + int s; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( determinant(Matrix()) ); CALL_SUBTEST_2( determinant(Matrix()) ); CALL_SUBTEST_3( determinant(Matrix()) ); CALL_SUBTEST_4( determinant(Matrix()) ); CALL_SUBTEST_5( determinant(Matrix, 10, 10>()) ); - CALL_SUBTEST_6( determinant(MatrixXd(20, 20)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_6( determinant(MatrixXd(s, s)) ); } - CALL_SUBTEST_6( determinant(MatrixXd(200, 200)) ); + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/diagonal.cpp b/gtsam/3rdparty/Eigen/test/diagonal.cpp index 50b341dfe..94a30e2f4 100644 --- a/gtsam/3rdparty/Eigen/test/diagonal.cpp +++ b/gtsam/3rdparty/Eigen/test/diagonal.cpp @@ -74,10 +74,10 @@ void test_diagonal() for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( diagonal(Matrix()) ); CALL_SUBTEST_2( diagonal(Matrix4d()) ); - CALL_SUBTEST_2( diagonal(MatrixXcf(3, 3)) ); - CALL_SUBTEST_2( diagonal(MatrixXi(8, 12)) ); - CALL_SUBTEST_2( diagonal(MatrixXcd(20, 20)) ); - CALL_SUBTEST_1( diagonal(MatrixXf(21, 19)) ); + CALL_SUBTEST_2( diagonal(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( diagonal(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( diagonal(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_1( diagonal(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_1( diagonal(Matrix(3, 4)) ); } } diff --git a/gtsam/3rdparty/Eigen/test/diagonalmatrices.cpp b/gtsam/3rdparty/Eigen/test/diagonalmatrices.cpp index 9d6f069c6..d84f4e9f3 100644 --- a/gtsam/3rdparty/Eigen/test/diagonalmatrices.cpp +++ b/gtsam/3rdparty/Eigen/test/diagonalmatrices.cpp @@ -101,9 +101,9 @@ void test_diagonalmatrices() CALL_SUBTEST_3( diagonalmatrices(Matrix()) ); CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) ); CALL_SUBTEST_5( diagonalmatrices(Matrix()) ); - CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(3, 5)) ); - CALL_SUBTEST_7( diagonalmatrices(MatrixXi(10, 8)) ); - CALL_SUBTEST_8( diagonalmatrices(Matrix(20, 20)) ); - CALL_SUBTEST_9( diagonalmatrices(MatrixXf(21, 24)) ); + CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_8( diagonalmatrices(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } } diff --git a/gtsam/3rdparty/Eigen/test/eigen2/main.h b/gtsam/3rdparty/Eigen/test/eigen2/main.h index b361a44d9..9d0defa39 100644 --- a/gtsam/3rdparty/Eigen/test/eigen2/main.h +++ b/gtsam/3rdparty/Eigen/test/eigen2/main.h @@ -29,10 +29,6 @@ #include #include -#ifdef NDEBUG -#undef NDEBUG -#endif - #ifndef EIGEN_TEST_FUNC #error EIGEN_TEST_FUNC must be defined #endif diff --git a/gtsam/3rdparty/Eigen/test/eigen2support.cpp b/gtsam/3rdparty/Eigen/test/eigen2support.cpp index a7269fed5..5d0202e34 100644 --- a/gtsam/3rdparty/Eigen/test/eigen2support.cpp +++ b/gtsam/3rdparty/Eigen/test/eigen2support.cpp @@ -35,7 +35,6 @@ template void eigen2support(const MatrixType& m) Index cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), - m2 = MatrixType::Random(rows, cols), m3(rows, cols); Scalar s1 = internal::random(), diff --git a/gtsam/3rdparty/Eigen/test/eigensolver_complex.cpp b/gtsam/3rdparty/Eigen/test/eigensolver_complex.cpp index 99e9ee864..1cd55a2cd 100644 --- a/gtsam/3rdparty/Eigen/test/eigensolver_complex.cpp +++ b/gtsam/3rdparty/Eigen/test/eigensolver_complex.cpp @@ -108,18 +108,23 @@ template void eigensolver_verify_assert(const MatrixType& m void test_eigensolver_complex() { + int s; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eigensolver(Matrix4cf()) ); - CALL_SUBTEST_2( eigensolver(MatrixXcd(14,14)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_2( eigensolver(MatrixXcd(s,s)) ); CALL_SUBTEST_3( eigensolver(Matrix, 1, 1>()) ); CALL_SUBTEST_4( eigensolver(Matrix3f()) ); } CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4cf()) ); - CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(14,14)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(s,s)) ); CALL_SUBTEST_3( eigensolver_verify_assert(Matrix, 1, 1>()) ); CALL_SUBTEST_4( eigensolver_verify_assert(Matrix3f()) ); // Test problem size constructors - CALL_SUBTEST_5(ComplexEigenSolver(10)); + CALL_SUBTEST_5(ComplexEigenSolver(s)); + + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/eigensolver_generic.cpp b/gtsam/3rdparty/Eigen/test/eigensolver_generic.cpp index 8476f026d..2e9cdc7a5 100644 --- a/gtsam/3rdparty/Eigen/test/eigensolver_generic.cpp +++ b/gtsam/3rdparty/Eigen/test/eigensolver_generic.cpp @@ -27,10 +27,6 @@ #include #include -#ifdef HAS_GSL -#include "gsl_helper.h" -#endif - template void eigensolver(const MatrixType& m) { typedef typename MatrixType::Index Index; @@ -97,9 +93,11 @@ template void eigensolver_verify_assert(const MatrixType& m void test_eigensolver_generic() { + int s; for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( eigensolver(Matrix4f()) ); - CALL_SUBTEST_2( eigensolver(MatrixXd(17,17)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_2( eigensolver(MatrixXd(s,s)) ); // some trivial but implementation-wise tricky cases CALL_SUBTEST_2( eigensolver(MatrixXd(1,1)) ); @@ -109,10 +107,24 @@ void test_eigensolver_generic() } CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4f()) ); - CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(17,17)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(s,s)) ); CALL_SUBTEST_3( eigensolver_verify_assert(Matrix()) ); CALL_SUBTEST_4( eigensolver_verify_assert(Matrix2d()) ); // Test problem size constructors - CALL_SUBTEST_5(EigenSolver(10)); + CALL_SUBTEST_5(EigenSolver(s)); + + // regression test for bug 410 + CALL_SUBTEST_2( + { + MatrixXd A(1,1); + A(0,0) = std::sqrt(-1.); + Eigen::EigenSolver solver(A); + MatrixXd V(1, 1); + V(0,0) = solver.eigenvectors()(0,0).real(); + } + ); + + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/eigensolver_selfadjoint.cpp b/gtsam/3rdparty/Eigen/test/eigensolver_selfadjoint.cpp index b85bcc289..26d3d1f70 100644 --- a/gtsam/3rdparty/Eigen/test/eigensolver_selfadjoint.cpp +++ b/gtsam/3rdparty/Eigen/test/eigensolver_selfadjoint.cpp @@ -27,10 +27,6 @@ #include #include -#ifdef HAS_GSL -#include "gsl_helper.h" -#endif - template void selfadjointeigensolver(const MatrixType& m) { typedef typename MatrixType::Index Index; @@ -59,64 +55,25 @@ template void selfadjointeigensolver(const MatrixType& m) symmB.template triangularView().setZero(); SelfAdjointEigenSolver eiSymm(symmA); + SelfAdjointEigenSolver eiDirect; + eiDirect.computeDirect(symmA); // generalized eigen pb GeneralizedSelfAdjointEigenSolver eiSymmGen(symmA, symmB); - #ifdef HAS_GSL - if (internal::is_same::value) - { - // restore symmA and symmB. - symmA = MatrixType(symmA.template selfadjointView()); - symmB = MatrixType(symmB.template selfadjointView()); - typedef GslTraits Gsl; - typename Gsl::Matrix gEvec=0, gSymmA=0, gSymmB=0; - typename GslTraits::Vector gEval=0; - RealVectorType _eval; - MatrixType _evec; - convert(symmA, gSymmA); - convert(symmB, gSymmB); - convert(symmA, gEvec); - gEval = GslTraits::createVector(rows); - - Gsl::eigen_symm(gSymmA, gEval, gEvec); - convert(gEval, _eval); - convert(gEvec, _evec); - - // test gsl itself ! - VERIFY((symmA * _evec).isApprox(_evec * _eval.asDiagonal(), largerEps)); - - // compare with eigen - VERIFY_IS_APPROX(_eval, eiSymm.eigenvalues()); - VERIFY_IS_APPROX(_evec.cwiseAbs(), eiSymm.eigenvectors().cwiseAbs()); - - // generalized pb - Gsl::eigen_symm_gen(gSymmA, gSymmB, gEval, gEvec); - convert(gEval, _eval); - convert(gEvec, _evec); - // test GSL itself: - VERIFY((symmA * _evec).isApprox(symmB * (_evec * _eval.asDiagonal()), largerEps)); - - // compare with eigen - MatrixType normalized_eivec = eiSymmGen.eigenvectors()*eiSymmGen.eigenvectors().colwise().norm().asDiagonal().inverse(); - VERIFY_IS_APPROX(_eval, eiSymmGen.eigenvalues()); - VERIFY_IS_APPROX(_evec.cwiseAbs(), normalized_eivec.cwiseAbs()); - - Gsl::free(gSymmA); - Gsl::free(gSymmB); - GslTraits::free(gEval); - Gsl::free(gEvec); - } - #endif - VERIFY_IS_EQUAL(eiSymm.info(), Success); VERIFY((symmA.template selfadjointView() * eiSymm.eigenvectors()).isApprox( eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal(), largerEps)); VERIFY_IS_APPROX(symmA.template selfadjointView().eigenvalues(), eiSymm.eigenvalues()); + + VERIFY_IS_EQUAL(eiDirect.info(), Success); + VERIFY((symmA.template selfadjointView() * eiDirect.eigenvectors()).isApprox( + eiDirect.eigenvectors() * eiDirect.eigenvalues().asDiagonal(), largerEps)); + VERIFY_IS_APPROX(symmA.template selfadjointView().eigenvalues(), eiDirect.eigenvalues()); SelfAdjointEigenSolver eiSymmNoEivecs(symmA, false); VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success); VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues()); - + // generalized eigen problem Ax = lBx eiSymmGen.compute(symmA, symmB,Ax_lBx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); @@ -171,15 +128,21 @@ template void selfadjointeigensolver(const MatrixType& m) void test_eigensolver_selfadjoint() { + int s; for(int i = 0; i < g_repeat; i++) { - // very important to test a 3x3 matrix since we provide a special path for it + // very important to test 3x3 and 2x2 matrices since we provide special paths for them + CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) ); CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) ); CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) ); - CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(10,10)) ); - CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(19,19)) ); - CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(17,17)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(s,s)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(s,s)) ); - CALL_SUBTEST_9( selfadjointeigensolver(Matrix,Dynamic,Dynamic,RowMajor>(17,17)) ); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_9( selfadjointeigensolver(Matrix,Dynamic,Dynamic,RowMajor>(s,s)) ); // some trivial but implementation-wise tricky cases CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(1,1)) ); @@ -189,7 +152,10 @@ void test_eigensolver_selfadjoint() } // Test problem size constructors - CALL_SUBTEST_8(SelfAdjointEigenSolver(10)); - CALL_SUBTEST_8(Tridiagonalization(10)); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); + CALL_SUBTEST_8(SelfAdjointEigenSolver(s)); + CALL_SUBTEST_8(Tridiagonalization(s)); + + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/geo_alignedbox.cpp b/gtsam/3rdparty/Eigen/test/geo_alignedbox.cpp index 738ca3150..724133725 100644 --- a/gtsam/3rdparty/Eigen/test/geo_alignedbox.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_alignedbox.cpp @@ -113,7 +113,7 @@ void specificTest1() Vector2f m; m << -1.0f, -2.0f; Vector2f M; M << 1.0f, 5.0f; - typedef AlignedBox BoxType; + typedef AlignedBox2f BoxType; BoxType box( m, M ); Vector2f sides = M-m; @@ -140,7 +140,7 @@ void specificTest2() Vector3i m; m << -1, -2, 0; Vector3i M; M << 1, 5, 3; - typedef AlignedBox BoxType; + typedef AlignedBox3i BoxType; BoxType box( m, M ); Vector3i sides = M-m; @@ -165,21 +165,21 @@ void test_geo_alignedbox() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( alignedbox(AlignedBox()) ); - CALL_SUBTEST_2( alignedboxCastTests(AlignedBox()) ); + CALL_SUBTEST_1( alignedbox(AlignedBox2f()) ); + CALL_SUBTEST_2( alignedboxCastTests(AlignedBox2f()) ); - CALL_SUBTEST_3( alignedbox(AlignedBox()) ); - CALL_SUBTEST_4( alignedboxCastTests(AlignedBox()) ); + CALL_SUBTEST_3( alignedbox(AlignedBox3f()) ); + CALL_SUBTEST_4( alignedboxCastTests(AlignedBox3f()) ); - CALL_SUBTEST_5( alignedbox(AlignedBox()) ); - CALL_SUBTEST_6( alignedboxCastTests(AlignedBox()) ); + CALL_SUBTEST_5( alignedbox(AlignedBox4d()) ); + CALL_SUBTEST_6( alignedboxCastTests(AlignedBox4d()) ); - CALL_SUBTEST_7( alignedbox(AlignedBox()) ); - CALL_SUBTEST_8( alignedboxCastTests(AlignedBox()) ); + CALL_SUBTEST_7( alignedbox(AlignedBox1d()) ); + CALL_SUBTEST_8( alignedboxCastTests(AlignedBox1d()) ); - CALL_SUBTEST_9( alignedbox(AlignedBox()) ); - CALL_SUBTEST_10( alignedbox(AlignedBox()) ); - CALL_SUBTEST_11( alignedbox(AlignedBox()) ); + CALL_SUBTEST_9( alignedbox(AlignedBox1i()) ); + CALL_SUBTEST_10( alignedbox(AlignedBox2i()) ); + CALL_SUBTEST_11( alignedbox(AlignedBox3i()) ); } CALL_SUBTEST_12( specificTest1() ); CALL_SUBTEST_13( specificTest2() ); diff --git a/gtsam/3rdparty/Eigen/test/geo_eulerangles.cpp b/gtsam/3rdparty/Eigen/test/geo_eulerangles.cpp index f82cb8fbe..8029a694e 100644 --- a/gtsam/3rdparty/Eigen/test/geo_eulerangles.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_eulerangles.cpp @@ -42,7 +42,6 @@ template void eulerangles(void) #define VERIFY_EULER(I,J,K, X,Y,Z) { \ Vector3 ea = m.eulerAngles(I,J,K); \ - Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \ VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \ } VERIFY_EULER(0,1,2, X,Y,Z); diff --git a/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp b/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp index 26254b757..3efcb77db 100644 --- a/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp @@ -42,17 +42,13 @@ template void homogeneous(void) typedef Matrix T3MatrixType; VectorType v0 = VectorType::Random(), - v1 = VectorType::Random(), ones = VectorType::Ones(); - HVectorType hv0 = HVectorType::Random(), - hv1 = HVectorType::Random(); + HVectorType hv0 = HVectorType::Random(); - MatrixType m0 = MatrixType::Random(), - m1 = MatrixType::Random(); + MatrixType m0 = MatrixType::Random(); - HMatrixType hm0 = HMatrixType::Random(), - hm1 = HMatrixType::Random(); + HMatrixType hm0 = HMatrixType::Random(); hv0 << v0, 1; VERIFY_IS_APPROX(v0.homogeneous(), hv0); diff --git a/gtsam/3rdparty/Eigen/test/geo_orthomethods.cpp b/gtsam/3rdparty/Eigen/test/geo_orthomethods.cpp index 020ae7103..aa3c8b61d 100644 --- a/gtsam/3rdparty/Eigen/test/geo_orthomethods.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_orthomethods.cpp @@ -88,9 +88,7 @@ template void orthomethods(int size=Size) typedef Matrix MatrixN3; typedef Matrix Vector3; - VectorType v0 = VectorType::Random(size), - v1 = VectorType::Random(size), - v2 = VectorType::Random(size); + VectorType v0 = VectorType::Random(size); // unitOrthogonal VERIFY_IS_MUCH_SMALLER_THAN(v0.unitOrthogonal().dot(v0), Scalar(1)); diff --git a/gtsam/3rdparty/Eigen/test/geo_parametrizedline.cpp b/gtsam/3rdparty/Eigen/test/geo_parametrizedline.cpp index 13f98fdd6..a289e70de 100644 --- a/gtsam/3rdparty/Eigen/test/geo_parametrizedline.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_parametrizedline.cpp @@ -40,6 +40,7 @@ template void parametrizedline(const LineType& _line) typedef Matrix VectorType; typedef Matrix MatrixType; + typedef Hyperplane HyperplaneType; VectorType p0 = VectorType::Random(dim); VectorType p1 = VectorType::Random(dim); @@ -64,6 +65,16 @@ template void parametrizedline(const LineType& _line) VERIFY_IS_APPROX(hp1f.template cast(),l0); ParametrizedLine hp1d = l0.template cast(); VERIFY_IS_APPROX(hp1d.template cast(),l0); + + // intersections + VectorType p2 = VectorType::Random(dim); + VectorType n2 = VectorType::Random(dim).normalized(); + HyperplaneType hp(p2,n2); + Scalar t = l0.intersectionParameter(hp); + VectorType pi = l0.pointAt(t); + VERIFY_IS_MUCH_SMALLER_THAN(hp.signedDistance(pi), RealScalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(l0.distance(pi), RealScalar(1)); + VERIFY_IS_APPROX(l0.intersectionPoint(hp), pi); } template void parametrizedline_alignment() diff --git a/gtsam/3rdparty/Eigen/test/geo_quaternion.cpp b/gtsam/3rdparty/Eigen/test/geo_quaternion.cpp index 7adbe0b3d..b73ae9cd4 100644 --- a/gtsam/3rdparty/Eigen/test/geo_quaternion.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_quaternion.cpp @@ -142,6 +142,17 @@ template void quaternion(void) VERIFY_IS_APPROX(-v3.normalized(),(q2.setFromTwoVectors(v1,-v3)*v1).normalized()); } + // from two vector creation static function + VERIFY_IS_APPROX( v2.normalized(),(Quaternionx::FromTwoVectors(v1, v2)*v1).normalized()); + VERIFY_IS_APPROX( v1.normalized(),(Quaternionx::FromTwoVectors(v1, v1)*v1).normalized()); + VERIFY_IS_APPROX(-v1.normalized(),(Quaternionx::FromTwoVectors(v1,-v1)*v1).normalized()); + if (internal::is_same::value) + { + v3 = (v1.array()+eps).matrix(); + VERIFY_IS_APPROX( v3.normalized(),(Quaternionx::FromTwoVectors(v1, v3)*v1).normalized()); + VERIFY_IS_APPROX(-v3.normalized(),(Quaternionx::FromTwoVectors(v1,-v3)*v1).normalized()); + } + // inverse and conjugate VERIFY_IS_APPROX(q1 * (q1.inverse() * v1), v1); VERIFY_IS_APPROX(q1 * (q1.conjugate() * v1), v1); diff --git a/gtsam/3rdparty/Eigen/test/geo_transformations.cpp b/gtsam/3rdparty/Eigen/test/geo_transformations.cpp index b606de2fb..e9f05cfb6 100644 --- a/gtsam/3rdparty/Eigen/test/geo_transformations.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_transformations.cpp @@ -122,9 +122,7 @@ template void transformations() typedef Translation Translation3; Vector3 v0 = Vector3::Random(), - v1 = Vector3::Random(), - v2 = Vector3::Random(); - Vector2 u0 = Vector2::Random(); + v1 = Vector3::Random(); Matrix3 matrot1, m; Scalar a = internal::random(-Scalar(M_PI), Scalar(M_PI)); @@ -284,9 +282,9 @@ template void transformations() // mat * aligned scaling and mat * translation t1 = (Matrix3(q1) * AlignedScaling3(v0)) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); - t1 = (Matrix3(q1) * Scaling(v0)) * Translation3(v0); + t1 = (Matrix3(q1) * Eigen::Scaling(v0)) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); - t1 = (q1 * Scaling(v0)) * Translation3(v0); + t1 = (q1 * Eigen::Scaling(v0)) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // mat * transformation and aligned scaling * translation t1 = Matrix3(q1) * (AlignedScaling3(v0) * Translation3(v0)); @@ -295,18 +293,18 @@ template void transformations() t0.setIdentity(); t0.scale(s0).translate(v0); - t1 = Scaling(s0) * Translation3(v0); + t1 = Eigen::Scaling(s0) * Translation3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.prescale(s0); - t1 = Scaling(s0) * t1; + t1 = Eigen::Scaling(s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0 = t3; t0.scale(s0); - t1 = t3 * Scaling(s0,s0,s0); + t1 = t3 * Eigen::Scaling(s0,s0,s0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.prescale(s0); - t1 = Scaling(s0,s0,s0) * t1; + t1 = Eigen::Scaling(s0,s0,s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); @@ -448,6 +446,29 @@ template void transform_alignment() #endif } +template void transform_products() +{ + typedef Matrix Mat; + typedef Transform Proj; + typedef Transform Aff; + typedef Transform AffC; + + Proj p; p.matrix().setRandom(); + Aff a; a.linear().setRandom(); a.translation().setRandom(); + AffC ac = a; + + Mat p_m(p.matrix()), a_m(a.matrix()); + + VERIFY_IS_APPROX((p*p).matrix(), p_m*p_m); + VERIFY_IS_APPROX((a*a).matrix(), a_m*a_m); + VERIFY_IS_APPROX((p*a).matrix(), p_m*a_m); + VERIFY_IS_APPROX((a*p).matrix(), a_m*p_m); + VERIFY_IS_APPROX((ac*a).matrix(), a_m*a_m); + VERIFY_IS_APPROX((a*ac).matrix(), a_m*a_m); + VERIFY_IS_APPROX((p*ac).matrix(), p_m*a_m); + VERIFY_IS_APPROX((ac*p).matrix(), a_m*p_m); +} + void test_geo_transformations() { for(int i = 0; i < g_repeat; i++) { @@ -470,5 +491,9 @@ void test_geo_transformations() CALL_SUBTEST_6(( transformations() )); CALL_SUBTEST_6(( transformations() )); + + + CALL_SUBTEST_7(( transform_products() )); + CALL_SUBTEST_7(( transform_products() )); } } diff --git a/gtsam/3rdparty/Eigen/test/gsl_helper.h b/gtsam/3rdparty/Eigen/test/gsl_helper.h deleted file mode 100644 index d6172d2ff..000000000 --- a/gtsam/3rdparty/Eigen/test/gsl_helper.h +++ /dev/null @@ -1,212 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_GSL_HELPER -#define EIGEN_GSL_HELPER - -#include - -#include -#include -#include -#include -#include -#include -#include - -namespace Eigen { - -template::IsComplex> struct GslTraits -{ - typedef gsl_matrix* Matrix; - typedef gsl_vector* Vector; - static Matrix createMatrix(int rows, int cols) { return gsl_matrix_alloc(rows,cols); } - static Vector createVector(int size) { return gsl_vector_alloc(size); } - static void free(Matrix& m) { gsl_matrix_free(m); m=0; } - static void free(Vector& m) { gsl_vector_free(m); m=0; } - static void prod(const Matrix& m, const Vector& v, Vector& x) { gsl_blas_dgemv(CblasNoTrans,1,m,v,0,x); } - static void cholesky(Matrix& m) { gsl_linalg_cholesky_decomp(m); } - static void cholesky_solve(const Matrix& m, const Vector& b, Vector& x) { gsl_linalg_cholesky_solve(m,b,x); } - static void eigen_symm(const Matrix& m, Vector& eval, Matrix& evec) - { - gsl_eigen_symmv_workspace * w = gsl_eigen_symmv_alloc(m->size1); - Matrix a = createMatrix(m->size1, m->size2); - gsl_matrix_memcpy(a, m); - gsl_eigen_symmv(a,eval,evec,w); - gsl_eigen_symmv_sort(eval, evec, GSL_EIGEN_SORT_VAL_ASC); - gsl_eigen_symmv_free(w); - free(a); - } - static void eigen_symm_gen(const Matrix& m, const Matrix& _b, Vector& eval, Matrix& evec) - { - gsl_eigen_gensymmv_workspace * w = gsl_eigen_gensymmv_alloc(m->size1); - Matrix a = createMatrix(m->size1, m->size2); - Matrix b = createMatrix(_b->size1, _b->size2); - gsl_matrix_memcpy(a, m); - gsl_matrix_memcpy(b, _b); - gsl_eigen_gensymmv(a,b,eval,evec,w); - gsl_eigen_symmv_sort(eval, evec, GSL_EIGEN_SORT_VAL_ASC); - gsl_eigen_gensymmv_free(w); - free(a); - } - - template - static void eigen_poly_solve(const EIGEN_VECTOR& poly, EIGEN_ROOTS& roots ) - { - const int deg = poly.size()-1; - double *z = new double[2*deg]; - double *a = new double[poly.size()]; - for( int i=0; i struct GslTraits -{ - typedef gsl_matrix_complex* Matrix; - typedef gsl_vector_complex* Vector; - static Matrix createMatrix(int rows, int cols) { return gsl_matrix_complex_alloc(rows,cols); } - static Vector createVector(int size) { return gsl_vector_complex_alloc(size); } - static void free(Matrix& m) { gsl_matrix_complex_free(m); m=0; } - static void free(Vector& m) { gsl_vector_complex_free(m); m=0; } - static void cholesky(Matrix& m) { gsl_linalg_complex_cholesky_decomp(m); } - static void cholesky_solve(const Matrix& m, const Vector& b, Vector& x) { gsl_linalg_complex_cholesky_solve(m,b,x); } - static void prod(const Matrix& m, const Vector& v, Vector& x) - { gsl_blas_zgemv(CblasNoTrans,gsl_complex_rect(1,0),m,v,gsl_complex_rect(0,0),x); } - static void eigen_symm(const Matrix& m, gsl_vector* &eval, Matrix& evec) - { - gsl_eigen_hermv_workspace * w = gsl_eigen_hermv_alloc(m->size1); - Matrix a = createMatrix(m->size1, m->size2); - gsl_matrix_complex_memcpy(a, m); - gsl_eigen_hermv(a,eval,evec,w); - gsl_eigen_hermv_sort(eval, evec, GSL_EIGEN_SORT_VAL_ASC); - gsl_eigen_hermv_free(w); - free(a); - } - static void eigen_symm_gen(const Matrix& m, const Matrix& _b, gsl_vector* &eval, Matrix& evec) - { - gsl_eigen_genhermv_workspace * w = gsl_eigen_genhermv_alloc(m->size1); - Matrix a = createMatrix(m->size1, m->size2); - Matrix b = createMatrix(_b->size1, _b->size2); - gsl_matrix_complex_memcpy(a, m); - gsl_matrix_complex_memcpy(b, _b); - gsl_eigen_genhermv(a,b,eval,evec,w); - gsl_eigen_hermv_sort(eval, evec, GSL_EIGEN_SORT_VAL_ASC); - gsl_eigen_genhermv_free(w); - free(a); - } -}; - -template -void convert(const MatrixType& m, gsl_matrix* &res) -{ -// if (res) -// gsl_matrix_free(res); - res = gsl_matrix_alloc(m.rows(), m.cols()); - for (int i=0 ; i -void convert(const gsl_matrix* m, MatrixType& res) -{ - res.resize(int(m->size1), int(m->size2)); - for (int i=0 ; i -void convert(const VectorType& m, gsl_vector* &res) -{ - if (res) gsl_vector_free(res); - res = gsl_vector_alloc(m.size()); - for (int i=0 ; i -void convert(const gsl_vector* m, VectorType& res) -{ - res.resize (m->size); - for (int i=0 ; i -void convert(const MatrixType& m, gsl_matrix_complex* &res) -{ - res = gsl_matrix_complex_alloc(m.rows(), m.cols()); - for (int i=0 ; i -void convert(const gsl_matrix_complex* m, MatrixType& res) -{ - res.resize(int(m->size1), int(m->size2)); - for (int i=0 ; i -void convert(const VectorType& m, gsl_vector_complex* &res) -{ - res = gsl_vector_complex_alloc(m.size()); - for (int i=0 ; i -void convert(const gsl_vector_complex* m, VectorType& res) -{ - res.resize(m->size); - for (int i=0 ; i,1>() )); CALL_SUBTEST_2(( hessenberg,2>() )); CALL_SUBTEST_3(( hessenberg,4>() )); - CALL_SUBTEST_4(( hessenberg(internal::random(1,320)) )); - CALL_SUBTEST_5(( hessenberg,Dynamic>(internal::random(1,320)) )); + CALL_SUBTEST_4(( hessenberg(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); + CALL_SUBTEST_5(( hessenberg,Dynamic>(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); // Test problem size constructors CALL_SUBTEST_6(HessenbergDecomposition(10)); diff --git a/gtsam/3rdparty/Eigen/test/householder.cpp b/gtsam/3rdparty/Eigen/test/householder.cpp index e77fa7ad0..6f6f317ea 100644 --- a/gtsam/3rdparty/Eigen/test/householder.cpp +++ b/gtsam/3rdparty/Eigen/test/householder.cpp @@ -130,9 +130,9 @@ void test_householder() CALL_SUBTEST_2( householder(Matrix()) ); CALL_SUBTEST_3( householder(Matrix()) ); CALL_SUBTEST_4( householder(Matrix()) ); - CALL_SUBTEST_5( householder(MatrixXd(10,12)) ); - CALL_SUBTEST_6( householder(MatrixXcf(16,17)) ); - CALL_SUBTEST_7( householder(MatrixXf(25,7)) ); + CALL_SUBTEST_5( householder(MatrixXd(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_6( householder(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( householder(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_8( householder(Matrix()) ); } } diff --git a/gtsam/3rdparty/Eigen/test/inverse.cpp b/gtsam/3rdparty/Eigen/test/inverse.cpp index f0c69e78c..81702432f 100644 --- a/gtsam/3rdparty/Eigen/test/inverse.cpp +++ b/gtsam/3rdparty/Eigen/test/inverse.cpp @@ -41,7 +41,6 @@ template void inverse(const MatrixType& m) MatrixType m1(rows, cols), m2(rows, cols), - mzero = MatrixType::Zero(rows, cols), identity = MatrixType::Identity(rows, rows); createRandomPIMatrixOfRank(rows,rows,rows,m1); m2 = m1.inverse(); @@ -114,4 +113,5 @@ void test_inverse() CALL_SUBTEST_7( inverse(Matrix4d()) ); CALL_SUBTEST_7( inverse(Matrix()) ); } + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/jacobi.cpp b/gtsam/3rdparty/Eigen/test/jacobi.cpp index 6464c63c5..4d462226c 100644 --- a/gtsam/3rdparty/Eigen/test/jacobi.cpp +++ b/gtsam/3rdparty/Eigen/test/jacobi.cpp @@ -82,8 +82,8 @@ void test_jacobi() CALL_SUBTEST_3(( jacobi() )); CALL_SUBTEST_3(( jacobi >() )); - int r = internal::random(2, 20), - c = internal::random(2, 20); + int r = internal::random(2, internal::random(1,EIGEN_TEST_MAX_SIZE)/2), + c = internal::random(2, internal::random(1,EIGEN_TEST_MAX_SIZE)/2); CALL_SUBTEST_4(( jacobi(MatrixXf(r,c)) )); CALL_SUBTEST_5(( jacobi(MatrixXcd(r,c)) )); CALL_SUBTEST_5(( jacobi >(MatrixXcd(r,c)) )); diff --git a/gtsam/3rdparty/Eigen/test/jacobisvd.cpp b/gtsam/3rdparty/Eigen/test/jacobisvd.cpp index 45873832a..3012fbe75 100644 --- a/gtsam/3rdparty/Eigen/test/jacobisvd.cpp +++ b/gtsam/3rdparty/Eigen/test/jacobisvd.cpp @@ -131,6 +131,12 @@ void jacobisvd_test_all_computation_options(const MatrixType& m) jacobisvd_solve(m, ComputeFullU | ComputeThinV); jacobisvd_solve(m, ComputeThinU | ComputeFullV); jacobisvd_solve(m, ComputeThinU | ComputeThinV); + + // test reconstruction + typedef typename MatrixType::Index Index; + Index diagSize = (std::min)(m.rows(), m.cols()); + JacobiSVD svd(m, ComputeThinU | ComputeThinV); + VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint()); } } @@ -248,9 +254,17 @@ void jacobisvd_inf_nan() // matrices containing denormal numbers. void jacobisvd_bug286() { +#if defined __INTEL_COMPILER +// shut up warning #239: floating point underflow +#pragma warning push +#pragma warning disable 239 +#endif Matrix2d M; M << -7.90884e-313, -4.94e-324, 0, 5.60844e-313; +#if defined __INTEL_COMPILER +#pragma warning pop +#endif JacobiSVD svd; svd.compute(M); // just check we don't loop indefinitely } @@ -333,8 +347,8 @@ void test_jacobisvd() CALL_SUBTEST_7( jacobisvd_inf_nan() ); } - CALL_SUBTEST_7(( jacobisvd(MatrixXf(internal::random(100, 150), internal::random(100, 150))) )); - CALL_SUBTEST_8(( jacobisvd(MatrixXcd(internal::random(80, 100), internal::random(80, 100))) )); + CALL_SUBTEST_7(( jacobisvd(MatrixXf(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + CALL_SUBTEST_8(( jacobisvd(MatrixXcd(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) )); // test matrixbase method CALL_SUBTEST_1(( jacobisvd_method() )); diff --git a/gtsam/3rdparty/Eigen/test/linearstructure.cpp b/gtsam/3rdparty/Eigen/test/linearstructure.cpp index 312102701..bbfdaacc1 100644 --- a/gtsam/3rdparty/Eigen/test/linearstructure.cpp +++ b/gtsam/3rdparty/Eigen/test/linearstructure.cpp @@ -39,8 +39,7 @@ template void linearStructure(const MatrixType& m) // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), - m3(rows, cols), - mzero = MatrixType::Zero(rows, cols); + m3(rows, cols); Scalar s1 = internal::random(); while (internal::abs(s1)<1e-3) s1 = internal::random(); @@ -90,10 +89,10 @@ void test_linearstructure() CALL_SUBTEST_2( linearStructure(Matrix2f()) ); CALL_SUBTEST_3( linearStructure(Vector3d()) ); CALL_SUBTEST_4( linearStructure(Matrix4d()) ); - CALL_SUBTEST_5( linearStructure(MatrixXcf(3, 3)) ); - CALL_SUBTEST_6( linearStructure(MatrixXf(8, 12)) ); - CALL_SUBTEST_7( linearStructure(MatrixXi(8, 12)) ); - CALL_SUBTEST_8( linearStructure(MatrixXcd(20, 20)) ); - CALL_SUBTEST_9( linearStructure(ArrayXXf(12, 8)) ); + CALL_SUBTEST_5( linearStructure(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_6( linearStructure(MatrixXf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_7( linearStructure(MatrixXi (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_8( linearStructure(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_9( linearStructure(ArrayXXf (internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } } diff --git a/gtsam/3rdparty/Eigen/test/lu.cpp b/gtsam/3rdparty/Eigen/test/lu.cpp index 552364d29..253f68542 100644 --- a/gtsam/3rdparty/Eigen/test/lu.cpp +++ b/gtsam/3rdparty/Eigen/test/lu.cpp @@ -37,7 +37,7 @@ template void lu_non_invertible() Index rows, cols, cols2; if(MatrixType::RowsAtCompileTime==Dynamic) { - rows = internal::random(2,200); + rows = internal::random(2,EIGEN_TEST_MAX_SIZE); } else { @@ -45,8 +45,8 @@ template void lu_non_invertible() } if(MatrixType::ColsAtCompileTime==Dynamic) { - cols = internal::random(2,200); - cols2 = internal::random(2,200); + cols = internal::random(2,EIGEN_TEST_MAX_SIZE); + cols2 = internal::random(2,EIGEN_TEST_MAX_SIZE); } else { @@ -117,7 +117,7 @@ template void lu_invertible() */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - int size = internal::random(1,200); + int size = internal::random(1,EIGEN_TEST_MAX_SIZE); MatrixType m1(size, size), m2(size, size), m3(size, size); FullPivLU lu; diff --git a/gtsam/3rdparty/Eigen/test/main.h b/gtsam/3rdparty/Eigen/test/main.h index 4510c1905..991194e30 100644 --- a/gtsam/3rdparty/Eigen/test/main.h +++ b/gtsam/3rdparty/Eigen/test/main.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,13 @@ #define min(A,B) please_protect_your_min_with_parentheses #define max(A,B) please_protect_your_max_with_parentheses +#define FORBIDDEN_IDENTIFIER (this_identifier_is_forbidden_to_avoid_clashes) this_identifier_is_forbidden_to_avoid_clashes +// B0 is defined in POSIX header termios.h +#define B0 FORBIDDEN_IDENTIFIER + +// the following file is automatically generated by cmake +#include "split_test_helper.h" + #ifdef NDEBUG #undef NDEBUG #endif @@ -186,7 +194,7 @@ static void verify_impl(bool condition, const char *testname, const char *file, } } -#define VERIFY(a) verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a)) +#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a)) #define VERIFY_IS_EQUAL(a, b) VERIFY(test_is_equal(a, b)) #define VERIFY_IS_APPROX(a, b) VERIFY(test_isApprox(a, b)) @@ -204,101 +212,6 @@ static void verify_impl(bool condition, const char *testname, const char *file, g_test_stack.pop_back(); \ } while (0) -#ifdef EIGEN_TEST_PART_1 -#define CALL_SUBTEST_1(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_1(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_2 -#define CALL_SUBTEST_2(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_2(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_3 -#define CALL_SUBTEST_3(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_3(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_4 -#define CALL_SUBTEST_4(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_4(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_5 -#define CALL_SUBTEST_5(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_5(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_6 -#define CALL_SUBTEST_6(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_6(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_7 -#define CALL_SUBTEST_7(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_7(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_8 -#define CALL_SUBTEST_8(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_8(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_9 -#define CALL_SUBTEST_9(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_9(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_10 -#define CALL_SUBTEST_10(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_10(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_11 -#define CALL_SUBTEST_11(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_11(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_12 -#define CALL_SUBTEST_12(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_12(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_13 -#define CALL_SUBTEST_13(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_13(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_14 -#define CALL_SUBTEST_14(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_14(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_15 -#define CALL_SUBTEST_15(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_15(FUNC) -#endif - -#ifdef EIGEN_TEST_PART_16 -#define CALL_SUBTEST_16(FUNC) CALL_SUBTEST(FUNC) -#else -#define CALL_SUBTEST_16(FUNC) -#endif namespace Eigen { @@ -447,6 +360,23 @@ void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typenam m = qra.householderQ() * d * qrb.householderQ(); } +template +void randomPermutationVector(PermutationVectorType& v, typename PermutationVectorType::Index size) +{ + typedef typename PermutationVectorType::Index Index; + typedef typename PermutationVectorType::Scalar Scalar; + v.resize(size); + for(Index i = 0; i < size; ++i) v(i) = Scalar(i); + if(size == 1) return; + for(Index n = 0; n < 3 * size; ++n) + { + Index i = internal::random(0, size-1); + Index j; + do j = internal::random(0, size-1); while(j==i); + std::swap(v(i), v(j)); + } +} + } // end namespace Eigen template struct GetDifferentType; diff --git a/gtsam/3rdparty/Eigen/test/mixingtypes.cpp b/gtsam/3rdparty/Eigen/test/mixingtypes.cpp index 8afb733cd..6819f934e 100644 --- a/gtsam/3rdparty/Eigen/test/mixingtypes.cpp +++ b/gtsam/3rdparty/Eigen/test/mixingtypes.cpp @@ -143,5 +143,5 @@ void test_mixingtypes() { CALL_SUBTEST_1(mixingtypes<3>()); CALL_SUBTEST_2(mixingtypes<4>()); - CALL_SUBTEST_3(mixingtypes(internal::random(1,310))); + CALL_SUBTEST_3(mixingtypes(internal::random(1,EIGEN_TEST_MAX_SIZE))); } diff --git a/gtsam/3rdparty/Eigen/test/nomalloc.cpp b/gtsam/3rdparty/Eigen/test/nomalloc.cpp index 96ff16dae..1feeff4bc 100644 --- a/gtsam/3rdparty/Eigen/test/nomalloc.cpp +++ b/gtsam/3rdparty/Eigen/test/nomalloc.cpp @@ -52,15 +52,7 @@ template void nomalloc(const MatrixType& m) MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), - m3(rows, cols), - mzero = MatrixType::Zero(rows, cols), - identity = Matrix - ::Identity(rows, rows), - square = Matrix - ::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + m3(rows, cols); Scalar s1 = internal::random(); @@ -137,13 +129,20 @@ void ctms_decompositions() 0, maxSize, maxSize> ComplexMatrix; - const Matrix A(Matrix::Random(size, size)); + const Matrix A(Matrix::Random(size, size)), B(Matrix::Random(size, size)); + Matrix X(size,size); const ComplexMatrix complexA(ComplexMatrix::Random(size, size)); const Matrix saA = A.adjoint() * A; + const Vector b(Vector::Random(size)); + Vector x(size); // Cholesky module Eigen::LLT LLT; LLT.compute(A); + X = LLT.solve(B); + x = LLT.solve(b); Eigen::LDLT LDLT; LDLT.compute(A); + X = LDLT.solve(B); + x = LDLT.solve(b); // Eigenvalues module Eigen::HessenbergDecomposition hessDecomp; hessDecomp.compute(complexA); @@ -155,12 +154,22 @@ void ctms_decompositions() // LU module Eigen::PartialPivLU ppLU; ppLU.compute(A); + X = ppLU.solve(B); + x = ppLU.solve(b); Eigen::FullPivLU fpLU; fpLU.compute(A); + X = fpLU.solve(B); + x = fpLU.solve(b); // QR module Eigen::HouseholderQR hQR; hQR.compute(A); + X = hQR.solve(B); + x = hQR.solve(b); Eigen::ColPivHouseholderQR cpQR; cpQR.compute(A); + // FIXME X = cpQR.solve(B); + x = cpQR.solve(b); Eigen::FullPivHouseholderQR fpQR; fpQR.compute(A); + // FIXME X = fpQR.solve(B); + x = fpQR.solve(b); // SVD module Eigen::JacobiSVD jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV); diff --git a/gtsam/3rdparty/Eigen/test/nullary.cpp b/gtsam/3rdparty/Eigen/test/nullary.cpp index 0df15c081..6c9ee5f34 100644 --- a/gtsam/3rdparty/Eigen/test/nullary.cpp +++ b/gtsam/3rdparty/Eigen/test/nullary.cpp @@ -52,11 +52,14 @@ void testVectorType(const VectorType& base) { typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; - Scalar low = internal::random(-500,500); - Scalar high = internal::random(-500,500); - if (low>high) std::swap(low,high); + const Index size = base.size(); - const Scalar step = (high-low)/(size-1); + + Scalar high = internal::random(-500,500); + Scalar low = (size == 1 ? high : internal::random(-500,500)); + if (low>high) std::swap(low,high); + + const Scalar step = ((size == 1) ? 1 : (high-low)/(size-1)); // check whether the result yields what we expect it to do VectorType m(base); @@ -76,8 +79,8 @@ void testVectorType(const VectorType& base) VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits::epsilon() ); // These guys sometimes fail! This is not good. Any ideas how to fix them!? -// VERIFY( m(m.size()-1) == high ); -// VERIFY( m(0) == low ); + //VERIFY( m(m.size()-1) == high ); + //VERIFY( m(0) == low ); // sequential access version m = VectorType::LinSpaced(Sequential,size,low,high); @@ -97,6 +100,12 @@ void testVectorType(const VectorType& base) Matrix size_changer(size+50); size_changer.setLinSpaced(size,low,high); VERIFY( size_changer.size() == size ); + + typedef Matrix ScalarMatrix; + ScalarMatrix scalar; + scalar.setLinSpaced(1,low,high); + VERIFY_IS_APPROX( scalar, ScalarMatrix::Constant(high) ); + VERIFY_IS_APPROX( ScalarMatrix::LinSpaced(1,low,high), ScalarMatrix::Constant(high) ); } template @@ -124,5 +133,6 @@ void test_nullary() CALL_SUBTEST_6( testVectorType(Vector3d()) ); CALL_SUBTEST_7( testVectorType(VectorXf(internal::random(1,300))) ); CALL_SUBTEST_8( testVectorType(Vector3f()) ); + CALL_SUBTEST_8( testVectorType(Matrix()) ); } } diff --git a/gtsam/3rdparty/Eigen/test/pardiso_support.cpp b/gtsam/3rdparty/Eigen/test/pardiso_support.cpp new file mode 100644 index 000000000..67efad6d8 --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/pardiso_support.cpp @@ -0,0 +1,29 @@ +/* + Intel Copyright (C) .... +*/ + +#include "sparse_solver.h" +#include + +template void test_pardiso_T() +{ + PardisoLLT < SparseMatrix, Lower> pardiso_llt_lower; + PardisoLLT < SparseMatrix, Upper> pardiso_llt_upper; + PardisoLDLT < SparseMatrix, Lower> pardiso_ldlt_lower; + PardisoLDLT < SparseMatrix, Upper> pardiso_ldlt_upper; + PardisoLU < SparseMatrix > pardiso_lu; + + check_sparse_spd_solving(pardiso_llt_lower); + check_sparse_spd_solving(pardiso_llt_upper); + check_sparse_spd_solving(pardiso_ldlt_lower); + check_sparse_spd_solving(pardiso_ldlt_upper); + check_sparse_square_solving(pardiso_lu); +} + +void test_pardiso_support() +{ + CALL_SUBTEST_1(test_pardiso_T()); + CALL_SUBTEST_2(test_pardiso_T()); + CALL_SUBTEST_3(test_pardiso_T< std::complex >()); + CALL_SUBTEST_4(test_pardiso_T< std::complex >()); +} diff --git a/gtsam/3rdparty/Eigen/test/pastix_support.cpp b/gtsam/3rdparty/Eigen/test/pastix_support.cpp new file mode 100644 index 000000000..dbce30d1c --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/pastix_support.cpp @@ -0,0 +1,59 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2012 Gael Guennebaud +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . +#include "sparse_solver.h" +#include +#include + + +template void test_pastix_T() +{ + PastixLLT< SparseMatrix, Eigen::Lower > pastix_llt_lower; + PastixLDLT< SparseMatrix, Eigen::Lower > pastix_ldlt_lower; + PastixLLT< SparseMatrix, Eigen::Upper > pastix_llt_upper; + PastixLDLT< SparseMatrix, Eigen::Upper > pastix_ldlt_upper; + PastixLU< SparseMatrix > pastix_lu; + + check_sparse_spd_solving(pastix_llt_lower); + check_sparse_spd_solving(pastix_ldlt_lower); + check_sparse_spd_solving(pastix_llt_upper); + check_sparse_spd_solving(pastix_ldlt_upper); + check_sparse_square_solving(pastix_lu); +} + +// There is no support for selfadjoint matrices with PaStiX. +// Complex symmetric matrices should pass though +template void test_pastix_T_LU() +{ + PastixLU< SparseMatrix > pastix_lu; + check_sparse_square_solving(pastix_lu); +} + +void test_pastix_support() +{ + CALL_SUBTEST_1(test_pastix_T()); + CALL_SUBTEST_2(test_pastix_T()); + CALL_SUBTEST_3( (test_pastix_T_LU >()) ); + CALL_SUBTEST_4(test_pastix_T_LU >()); +} \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/test/permutationmatrices.cpp b/gtsam/3rdparty/Eigen/test/permutationmatrices.cpp index d0fa01310..308838c70 100644 --- a/gtsam/3rdparty/Eigen/test/permutationmatrices.cpp +++ b/gtsam/3rdparty/Eigen/test/permutationmatrices.cpp @@ -24,23 +24,6 @@ #include "main.h" -template -void randomPermutationVector(PermutationVectorType& v, typename PermutationVectorType::Index size) -{ - typedef typename PermutationVectorType::Index Index; - typedef typename PermutationVectorType::Scalar Scalar; - v.resize(size); - for(Index i = 0; i < size; ++i) v(i) = Scalar(i); - if(size == 1) return; - for(Index n = 0; n < 3 * size; ++n) - { - Index i = internal::random(0, size-1); - Index j; - do j = internal::random(0, size-1); while(j==i); - std::swap(v(i), v(j)); - } -} - using namespace std; template void permutationmatrices(const MatrixType& m) { diff --git a/gtsam/3rdparty/Eigen/test/product.h b/gtsam/3rdparty/Eigen/test/product.h index 40ae4d51b..e77f8c41e 100644 --- a/gtsam/3rdparty/Eigen/test/product.h +++ b/gtsam/3rdparty/Eigen/test/product.h @@ -54,8 +54,7 @@ template void product(const MatrixType& m) // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), - m3(rows, cols), - mzero = MatrixType::Zero(rows, cols); + m3(rows, cols); RowSquareMatrixType identity = RowSquareMatrixType::Identity(rows, rows), square = RowSquareMatrixType::Random(rows, rows), @@ -63,9 +62,7 @@ template void product(const MatrixType& m) ColSquareMatrixType square2 = ColSquareMatrixType::Random(cols, cols), res2 = ColSquareMatrixType::Random(cols, cols); - RowVectorType v1 = RowVectorType::Random(rows), - v2 = RowVectorType::Random(rows), - vzero = RowVectorType::Zero(rows); + RowVectorType v1 = RowVectorType::Random(rows); ColVectorType vc2 = ColVectorType::Random(cols), vcres(cols); OtherMajorMatrixType tm1 = m1; diff --git a/gtsam/3rdparty/Eigen/test/product_extra.cpp b/gtsam/3rdparty/Eigen/test/product_extra.cpp index 15dc5ab96..ca302b469 100644 --- a/gtsam/3rdparty/Eigen/test/product_extra.cpp +++ b/gtsam/3rdparty/Eigen/test/product_extra.cpp @@ -153,11 +153,11 @@ void zero_sized_objects() void test_product_extra() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( product_extra(MatrixXf(internal::random(1,320), internal::random(1,320))) ); - CALL_SUBTEST_2( product_extra(MatrixXd(internal::random(1,320), internal::random(1,320))) ); + CALL_SUBTEST_1( product_extra(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( product_extra(MatrixXd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_2( mat_mat_scalar_scalar_product() ); - CALL_SUBTEST_3( product_extra(MatrixXcf(internal::random(1,150), internal::random(1,150))) ); - CALL_SUBTEST_4( product_extra(MatrixXcd(internal::random(1,150), internal::random(1,150))) ); + CALL_SUBTEST_3( product_extra(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_4( product_extra(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_5( zero_sized_objects() ); } } diff --git a/gtsam/3rdparty/Eigen/test/product_large.cpp b/gtsam/3rdparty/Eigen/test/product_large.cpp index 8ed937068..6f7a91b84 100644 --- a/gtsam/3rdparty/Eigen/test/product_large.cpp +++ b/gtsam/3rdparty/Eigen/test/product_large.cpp @@ -27,11 +27,11 @@ void test_product_large() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( product(MatrixXf(internal::random(1,320), internal::random(1,320))) ); - CALL_SUBTEST_2( product(MatrixXd(internal::random(1,320), internal::random(1,320))) ); - CALL_SUBTEST_3( product(MatrixXi(internal::random(1,320), internal::random(1,320))) ); - CALL_SUBTEST_4( product(MatrixXcf(internal::random(1,150), internal::random(1,150))) ); - CALL_SUBTEST_5( product(Matrix(internal::random(1,320), internal::random(1,320))) ); + CALL_SUBTEST_1( product(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( product(MatrixXd(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_3( product(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_4( product(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_5( product(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } #if defined EIGEN_TEST_PART_6 diff --git a/gtsam/3rdparty/Eigen/test/product_mmtr.cpp b/gtsam/3rdparty/Eigen/test/product_mmtr.cpp index 1048a894d..c6ceb487f 100644 --- a/gtsam/3rdparty/Eigen/test/product_mmtr.cpp +++ b/gtsam/3rdparty/Eigen/test/product_mmtr.cpp @@ -72,9 +72,9 @@ void test_product_mmtr() { for(int i = 0; i < g_repeat ; i++) { - CALL_SUBTEST_1((mmtr(internal::random(1,320)))); - CALL_SUBTEST_2((mmtr(internal::random(1,320)))); - CALL_SUBTEST_3((mmtr >(internal::random(1,200)))); - CALL_SUBTEST_4((mmtr >(internal::random(1,200)))); + CALL_SUBTEST_1((mmtr(internal::random(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_2((mmtr(internal::random(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_3((mmtr >(internal::random(1,EIGEN_TEST_MAX_SIZE/2)))); + CALL_SUBTEST_4((mmtr >(internal::random(1,EIGEN_TEST_MAX_SIZE/2)))); } } diff --git a/gtsam/3rdparty/Eigen/test/product_notemporary.cpp b/gtsam/3rdparty/Eigen/test/product_notemporary.cpp index 980e2bbaf..1a9dc14d9 100644 --- a/gtsam/3rdparty/Eigen/test/product_notemporary.cpp +++ b/gtsam/3rdparty/Eigen/test/product_notemporary.cpp @@ -141,13 +141,13 @@ void test_product_notemporary() { int s; for(int i = 0; i < g_repeat; i++) { - s = internal::random(16,320); + s = internal::random(16,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_1( product_notemporary(MatrixXf(s, s)) ); - s = internal::random(16,320); + s = internal::random(16,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_2( product_notemporary(MatrixXd(s, s)) ); - s = internal::random(16,120); + s = internal::random(16,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_3( product_notemporary(MatrixXcf(s,s)) ); - s = internal::random(16,120); + s = internal::random(16,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_4( product_notemporary(MatrixXcd(s,s)) ); } } diff --git a/gtsam/3rdparty/Eigen/test/product_selfadjoint.cpp b/gtsam/3rdparty/Eigen/test/product_selfadjoint.cpp index ca84969eb..6c1d83bf2 100644 --- a/gtsam/3rdparty/Eigen/test/product_selfadjoint.cpp +++ b/gtsam/3rdparty/Eigen/test/product_selfadjoint.cpp @@ -83,13 +83,14 @@ void test_product_selfadjoint() CALL_SUBTEST_1( product_selfadjoint(Matrix()) ); CALL_SUBTEST_2( product_selfadjoint(Matrix()) ); CALL_SUBTEST_3( product_selfadjoint(Matrix3d()) ); - s = internal::random(1,150); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_4( product_selfadjoint(MatrixXcf(s, s)) ); - s = internal::random(1,150); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_5( product_selfadjoint(MatrixXcd(s,s)) ); - s = internal::random(1,320); + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_6( product_selfadjoint(MatrixXd(s,s)) ); - s = internal::random(1,320); + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_7( product_selfadjoint(Matrix(s,s)) ); } + EIGEN_UNUSED_VARIABLE(s) } diff --git a/gtsam/3rdparty/Eigen/test/product_small.cpp b/gtsam/3rdparty/Eigen/test/product_small.cpp index d7f1c09ff..cf430a2d3 100644 --- a/gtsam/3rdparty/Eigen/test/product_small.cpp +++ b/gtsam/3rdparty/Eigen/test/product_small.cpp @@ -25,6 +25,25 @@ #define EIGEN_NO_STATIC_ASSERT #include "product.h" +// regression test for bug 447 +void product1x1() +{ + Matrix matAstatic; + Matrix matBstatic; + matAstatic.setRandom(); + matBstatic.setRandom(); + VERIFY_IS_APPROX( (matAstatic * matBstatic).coeff(0,0), + matAstatic.cwiseProduct(matBstatic.transpose()).sum() ); + + MatrixXf matAdynamic(1,3); + MatrixXf matBdynamic(3,1); + matAdynamic.setRandom(); + matBdynamic.setRandom(); + VERIFY_IS_APPROX( (matAdynamic * matBdynamic).coeff(0,0), + matAdynamic.cwiseProduct(matBdynamic.transpose()).sum() ); +} + + void test_product_small() { for(int i = 0; i < g_repeat; i++) { @@ -33,6 +52,7 @@ void test_product_small() CALL_SUBTEST_3( product(Matrix3d()) ); CALL_SUBTEST_4( product(Matrix4d()) ); CALL_SUBTEST_5( product(Matrix4f()) ); + CALL_SUBTEST_6( product1x1() ); } #ifdef EIGEN_TEST_PART_6 diff --git a/gtsam/3rdparty/Eigen/test/product_symm.cpp b/gtsam/3rdparty/Eigen/test/product_symm.cpp index 21c2f605b..4585c3b6a 100644 --- a/gtsam/3rdparty/Eigen/test/product_symm.cpp +++ b/gtsam/3rdparty/Eigen/test/product_symm.cpp @@ -98,14 +98,14 @@ void test_product_symm() { for(int i = 0; i < g_repeat ; i++) { - CALL_SUBTEST_1(( symm(internal::random(1,320),internal::random(1,320)) )); - CALL_SUBTEST_2(( symm(internal::random(1,320),internal::random(1,320)) )); - CALL_SUBTEST_3(( symm,Dynamic,Dynamic>(internal::random(1,200),internal::random(1,200)) )); - CALL_SUBTEST_4(( symm,Dynamic,Dynamic>(internal::random(1,200),internal::random(1,200)) )); + CALL_SUBTEST_1(( symm(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE)) )); + CALL_SUBTEST_2(( symm(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE)) )); + CALL_SUBTEST_3(( symm,Dynamic,Dynamic>(internal::random(1,EIGEN_TEST_MAX_SIZE/2),internal::random(1,EIGEN_TEST_MAX_SIZE/2)) )); + CALL_SUBTEST_4(( symm,Dynamic,Dynamic>(internal::random(1,EIGEN_TEST_MAX_SIZE/2),internal::random(1,EIGEN_TEST_MAX_SIZE/2)) )); - CALL_SUBTEST_5(( symm(internal::random(1,320)) )); - CALL_SUBTEST_6(( symm(internal::random(1,320)) )); - CALL_SUBTEST_7(( symm,Dynamic,1>(internal::random(1,320)) )); - CALL_SUBTEST_8(( symm,Dynamic,1>(internal::random(1,320)) )); + CALL_SUBTEST_5(( symm(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); + CALL_SUBTEST_6(( symm(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); + CALL_SUBTEST_7(( symm,Dynamic,1>(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); + CALL_SUBTEST_8(( symm,Dynamic,1>(internal::random(1,EIGEN_TEST_MAX_SIZE)) )); } } diff --git a/gtsam/3rdparty/Eigen/test/product_syrk.cpp b/gtsam/3rdparty/Eigen/test/product_syrk.cpp index 553410b9b..71285acb1 100644 --- a/gtsam/3rdparty/Eigen/test/product_syrk.cpp +++ b/gtsam/3rdparty/Eigen/test/product_syrk.cpp @@ -101,13 +101,13 @@ void test_product_syrk() for(int i = 0; i < g_repeat ; i++) { int s; - s = internal::random(1,320); + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_1( syrk(MatrixXf(s, s)) ); - s = internal::random(1,320); + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_2( syrk(MatrixXd(s, s)) ); - s = internal::random(1,200); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_3( syrk(MatrixXcf(s, s)) ); - s = internal::random(1,200); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_4( syrk(MatrixXcd(s, s)) ); } } diff --git a/gtsam/3rdparty/Eigen/test/product_trmm.cpp b/gtsam/3rdparty/Eigen/test/product_trmm.cpp index e117f6931..dab05d8b0 100644 --- a/gtsam/3rdparty/Eigen/test/product_trmm.cpp +++ b/gtsam/3rdparty/Eigen/test/product_trmm.cpp @@ -24,70 +24,100 @@ #include "main.h" -template void trmm(int size,int /*othersize*/) +template +void trmm(int rows=internal::random(1,EIGEN_TEST_MAX_SIZE), + int cols=internal::random(1,EIGEN_TEST_MAX_SIZE), + int otherCols = OtherCols==Dynamic?internal::random(1,EIGEN_TEST_MAX_SIZE):OtherCols) { typedef typename NumTraits::Real RealScalar; - typedef Matrix MatrixColMaj; - typedef Matrix MatrixRowMaj; + typedef Matrix TriMatrix; + typedef Matrix OnTheRight; + typedef Matrix OnTheLeft; + + typedef Matrix ResXS; + typedef Matrix ResSX; - DenseIndex rows = size; - DenseIndex cols = internal::random(1,size); - - MatrixColMaj triV(rows,cols), triH(cols,rows), upTri(cols,rows), loTri(rows,cols), - unitUpTri(cols,rows), unitLoTri(rows,cols), strictlyUpTri(cols,rows), strictlyLoTri(rows,cols); - MatrixColMaj ge1(rows,cols), ge2(cols,rows), ge3; - MatrixRowMaj rge3; + TriMatrix mat(rows,cols), tri(rows,cols), triTr(cols,rows); + + OnTheRight ge_right(cols,otherCols); + OnTheLeft ge_left(otherCols,rows); + ResSX ge_sx, ge_sx_save; + ResXS ge_xs, ge_xs_save; Scalar s1 = internal::random(), s2 = internal::random(); - triV.setRandom(); - triH.setRandom(); - loTri = triV.template triangularView(); - upTri = triH.template triangularView(); - unitLoTri = triV.template triangularView(); - unitUpTri = triH.template triangularView(); - strictlyLoTri = triV.template triangularView(); - strictlyUpTri = triH.template triangularView(); - ge1.setRandom(); - ge2.setRandom(); + mat.setRandom(); + tri = mat.template triangularView(); + triTr = mat.transpose().template triangularView(); + ge_right.setRandom(); + ge_left.setRandom(); - VERIFY_IS_APPROX( ge3 = triV.template triangularView() * ge2, loTri * ge2); - VERIFY_IS_APPROX( ge3 = ge2 * triV.template triangularView(), ge2 * loTri); - VERIFY_IS_APPROX( ge3 = triH.template triangularView() * ge1, upTri * ge1); - VERIFY_IS_APPROX( ge3 = ge1 * triH.template triangularView(), ge1 * upTri); - VERIFY_IS_APPROX( ge3 = (s1*triV.adjoint()).template triangularView() * (s2*ge1), s1*loTri.adjoint() * (s2*ge1)); - VERIFY_IS_APPROX( ge3 = ge1 * triV.adjoint().template triangularView(), ge1 * loTri.adjoint()); - VERIFY_IS_APPROX( ge3 = triH.adjoint().template triangularView() * ge2, upTri.adjoint() * ge2); - VERIFY_IS_APPROX( ge3 = ge2 * triH.adjoint().template triangularView(), ge2 * upTri.adjoint()); - VERIFY_IS_APPROX( ge3 = triV.template triangularView() * ge1.adjoint(), loTri * ge1.adjoint()); - VERIFY_IS_APPROX( ge3 = ge1.adjoint() * triV.template triangularView(), ge1.adjoint() * loTri); - VERIFY_IS_APPROX( ge3 = triH.template triangularView() * ge2.adjoint(), upTri * ge2.adjoint()); - VERIFY_IS_APPROX(rge3.noalias() = triH.template triangularView() * ge2.adjoint(), upTri * ge2.adjoint()); - VERIFY_IS_APPROX( ge3 = (s1*triV).adjoint().template triangularView() * ge2.adjoint(), internal::conj(s1) * loTri.adjoint() * ge2.adjoint()); - VERIFY_IS_APPROX(rge3.noalias() = triV.adjoint().template triangularView() * ge2.adjoint(), loTri.adjoint() * ge2.adjoint()); - VERIFY_IS_APPROX( ge3 = triH.adjoint().template triangularView() * ge1.adjoint(), upTri.adjoint() * ge1.adjoint()); - VERIFY_IS_APPROX(rge3.noalias() = triH.adjoint().template triangularView() * ge1.adjoint(), upTri.adjoint() * ge1.adjoint()); - - VERIFY_IS_APPROX( ge3 = triV.template triangularView() * ge2, unitLoTri * ge2); - VERIFY_IS_APPROX( rge3.noalias() = ge2 * triV.template triangularView(), ge2 * unitLoTri); - VERIFY_IS_APPROX( ge3 = ge2 * triV.template triangularView(), ge2 * unitLoTri); - VERIFY_IS_APPROX( ge3 = (s1*triV).adjoint().template triangularView() * ge2.adjoint(), internal::conj(s1) * unitLoTri.adjoint() * ge2.adjoint()); - - VERIFY_IS_APPROX( ge3 = triV.template triangularView() * ge2, strictlyLoTri * ge2); - VERIFY_IS_APPROX( rge3.noalias() = ge2 * triV.template triangularView(), ge2 * strictlyLoTri); - VERIFY_IS_APPROX( ge3 = ge2 * triV.template triangularView(), ge2 * strictlyLoTri); - VERIFY_IS_APPROX( ge3 = (s1*triV).adjoint().template triangularView() * ge2.adjoint(), internal::conj(s1) * strictlyLoTri.adjoint() * ge2.adjoint()); + VERIFY_IS_APPROX( ge_xs = mat.template triangularView() * ge_right, tri * ge_right); + VERIFY_IS_APPROX( ge_sx = ge_left * mat.template triangularView(), ge_left * tri); + + VERIFY_IS_APPROX( ge_xs.noalias() = mat.template triangularView() * ge_right, tri * ge_right); + VERIFY_IS_APPROX( ge_sx.noalias() = ge_left * mat.template triangularView(), ge_left * tri); + + VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView() * (s2*ge_left.transpose()), s1*triTr.conjugate() * (s2*ge_left.transpose())); + VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.transpose() * mat.adjoint().template triangularView(), ge_right.transpose() * triTr.conjugate()); + + VERIFY_IS_APPROX( ge_xs.noalias() = (s1*mat.adjoint()).template triangularView() * (s2*ge_left.adjoint()), s1*triTr.conjugate() * (s2*ge_left.adjoint())); + VERIFY_IS_APPROX( ge_sx.noalias() = ge_right.adjoint() * mat.adjoint().template triangularView(), ge_right.adjoint() * triTr.conjugate()); + + ge_xs_save = ge_xs; + VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView() * (s2*ge_left.adjoint()) ); + ge_sx_save = ge_sx; + VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView()).eval()); + + VERIFY_IS_APPROX( ge_xs = (s1*mat).adjoint().template triangularView() * ge_left.adjoint(), internal::conj(s1) * triTr.conjugate() * ge_left.adjoint()); + + // TODO check with sub-matrix expressions ? } +template +void trmv(int rows=internal::random(1,EIGEN_TEST_MAX_SIZE), int cols=internal::random(1,EIGEN_TEST_MAX_SIZE)) +{ + trmm(rows,cols,1); +} + +template +void trmm(int rows=internal::random(1,EIGEN_TEST_MAX_SIZE), int cols=internal::random(1,EIGEN_TEST_MAX_SIZE), int otherCols = internal::random(1,EIGEN_TEST_MAX_SIZE)) +{ + trmm(rows,cols,otherCols); +} + +#define CALL_ALL_ORDERS(NB,SCALAR,MODE) \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + EIGEN_CAT(CALL_SUBTEST_,NB)((trmm())); \ + \ + EIGEN_CAT(CALL_SUBTEST_1,NB)((trmv())); \ + EIGEN_CAT(CALL_SUBTEST_1,NB)((trmv())); + + +#define CALL_ALL(NB,SCALAR) \ + CALL_ALL_ORDERS(EIGEN_CAT(1,NB),SCALAR,Upper) \ + CALL_ALL_ORDERS(EIGEN_CAT(2,NB),SCALAR,UnitUpper) \ + CALL_ALL_ORDERS(EIGEN_CAT(3,NB),SCALAR,StrictlyUpper) \ + CALL_ALL_ORDERS(EIGEN_CAT(1,NB),SCALAR,Lower) \ + CALL_ALL_ORDERS(EIGEN_CAT(2,NB),SCALAR,UnitLower) \ + CALL_ALL_ORDERS(EIGEN_CAT(3,NB),SCALAR,StrictlyLower) + + void test_product_trmm() { for(int i = 0; i < g_repeat ; i++) { - CALL_SUBTEST_1((trmm(internal::random(1,320),internal::random(1,320)))); - CALL_SUBTEST_2((trmm(internal::random(1,320),internal::random(1,320)))); - CALL_SUBTEST_3((trmm >(internal::random(1,200),internal::random(1,200)))); - CALL_SUBTEST_4((trmm >(internal::random(1,200),internal::random(1,200)))); + CALL_ALL(1,float); // EIGEN_SUFFIXES;11;111;21;121;31;131 + CALL_ALL(2,double); // EIGEN_SUFFIXES;12;112;22;122;32;132 + CALL_ALL(3,std::complex); // EIGEN_SUFFIXES;13;113;23;123;33;133 + CALL_ALL(4,std::complex); // EIGEN_SUFFIXES;14;114;24;124;34;134 } } diff --git a/gtsam/3rdparty/Eigen/test/product_trmv.cpp b/gtsam/3rdparty/Eigen/test/product_trmv.cpp index cfb7355ff..52707d22b 100644 --- a/gtsam/3rdparty/Eigen/test/product_trmv.cpp +++ b/gtsam/3rdparty/Eigen/test/product_trmv.cpp @@ -93,11 +93,12 @@ void test_product_trmv() CALL_SUBTEST_1( trmv(Matrix()) ); CALL_SUBTEST_2( trmv(Matrix()) ); CALL_SUBTEST_3( trmv(Matrix3d()) ); - s = internal::random(1,200); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_4( trmv(MatrixXcf(s,s)) ); - s = internal::random(1,200); + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_5( trmv(MatrixXcd(s,s)) ); - s = internal::random(1,320); + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_6( trmv(Matrix(s, s)) ); } + EIGEN_UNUSED_VARIABLE(s); } diff --git a/gtsam/3rdparty/Eigen/test/product_trsolve.cpp b/gtsam/3rdparty/Eigen/test/product_trsolve.cpp index c207cc500..20380accd 100644 --- a/gtsam/3rdparty/Eigen/test/product_trsolve.cpp +++ b/gtsam/3rdparty/Eigen/test/product_trsolve.cpp @@ -93,14 +93,14 @@ void test_product_trsolve() for(int i = 0; i < g_repeat ; i++) { // matrices - CALL_SUBTEST_1((trsolve(internal::random(1,320),internal::random(1,320)))); - CALL_SUBTEST_2((trsolve(internal::random(1,320),internal::random(1,320)))); - CALL_SUBTEST_3((trsolve,Dynamic,Dynamic>(internal::random(1,200),internal::random(1,200)))); - CALL_SUBTEST_4((trsolve,Dynamic,Dynamic>(internal::random(1,200),internal::random(1,200)))); + CALL_SUBTEST_1((trsolve(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_2((trsolve(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_3((trsolve,Dynamic,Dynamic>(internal::random(1,EIGEN_TEST_MAX_SIZE/2),internal::random(1,EIGEN_TEST_MAX_SIZE/2)))); + CALL_SUBTEST_4((trsolve,Dynamic,Dynamic>(internal::random(1,EIGEN_TEST_MAX_SIZE/2),internal::random(1,EIGEN_TEST_MAX_SIZE/2)))); // vectors - CALL_SUBTEST_1((trsolve(internal::random(1,320)))); - CALL_SUBTEST_5((trsolve,Dynamic,1>(internal::random(1,320)))); + CALL_SUBTEST_1((trsolve(internal::random(1,EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_5((trsolve,Dynamic,1>(internal::random(1,EIGEN_TEST_MAX_SIZE)))); CALL_SUBTEST_6((trsolve())); CALL_SUBTEST_7((trsolve())); CALL_SUBTEST_8((trsolve,4,1>())); diff --git a/gtsam/3rdparty/Eigen/test/qr.cpp b/gtsam/3rdparty/Eigen/test/qr.cpp index 7e9ac9df9..6005c0e27 100644 --- a/gtsam/3rdparty/Eigen/test/qr.cpp +++ b/gtsam/3rdparty/Eigen/test/qr.cpp @@ -114,8 +114,8 @@ template void qr_verify_assert() void test_qr() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( qr(MatrixXf(internal::random(1,200),internal::random(1,200))) ); - CALL_SUBTEST_2( qr(MatrixXcd(internal::random(1,200),internal::random(1,200))) ); + CALL_SUBTEST_1( qr(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE),internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + CALL_SUBTEST_2( qr(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE/2),internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_3(( qr_fixedsize, 2 >() )); CALL_SUBTEST_4(( qr_fixedsize, 4 >() )); CALL_SUBTEST_5(( qr_fixedsize, 7 >() )); diff --git a/gtsam/3rdparty/Eigen/test/qr_colpivoting.cpp b/gtsam/3rdparty/Eigen/test/qr_colpivoting.cpp index 3cf651fa7..cdcf060ef 100644 --- a/gtsam/3rdparty/Eigen/test/qr_colpivoting.cpp +++ b/gtsam/3rdparty/Eigen/test/qr_colpivoting.cpp @@ -30,7 +30,7 @@ template void qr() { typedef typename MatrixType::Index Index; - Index rows = internal::random(2,200), cols = internal::random(2,200), cols2 = internal::random(2,200); + Index rows = internal::random(2,EIGEN_TEST_MAX_SIZE), cols = internal::random(2,EIGEN_TEST_MAX_SIZE), cols2 = internal::random(2,EIGEN_TEST_MAX_SIZE); Index rank = internal::random(1, (std::min)(rows, cols)-1); typedef typename MatrixType::Scalar Scalar; diff --git a/gtsam/3rdparty/Eigen/test/redux.cpp b/gtsam/3rdparty/Eigen/test/redux.cpp index a8bcf3b51..61d1bf911 100644 --- a/gtsam/3rdparty/Eigen/test/redux.cpp +++ b/gtsam/3rdparty/Eigen/test/redux.cpp @@ -35,6 +35,10 @@ template void matrixRedux(const MatrixType& m) MatrixType m1 = MatrixType::Random(rows, cols); + // The entries of m1 are uniformly distributed in [0,1], so m1.prod() is very small. This may lead to test + // failures if we underflow into denormals. Thus, we scale so that entires are close to 1. + MatrixType m1_for_prod = MatrixType::Ones(rows, cols) + Scalar(0.2) * m1; + VERIFY_IS_MUCH_SMALLER_THAN(MatrixType::Zero(rows, cols).sum(), Scalar(1)); VERIFY_IS_APPROX(MatrixType::Ones(rows, cols).sum(), Scalar(float(rows*cols))); // the float() here to shut up excessive MSVC warning about int->complex conversion being lossy Scalar s(0), p(1), minc(internal::real(m1.coeff(0))), maxc(internal::real(m1.coeff(0))); @@ -42,7 +46,7 @@ template void matrixRedux(const MatrixType& m) for(int i = 0; i < rows; i++) { s += m1(i,j); - p *= m1(i,j); + p *= m1_for_prod(i,j); minc = (std::min)(internal::real(minc), internal::real(m1(i,j))); maxc = (std::max)(internal::real(maxc), internal::real(m1(i,j))); } @@ -50,7 +54,7 @@ template void matrixRedux(const MatrixType& m) VERIFY_IS_APPROX(m1.sum(), s); VERIFY_IS_APPROX(m1.mean(), mean); - VERIFY_IS_APPROX(m1.prod(), p); + VERIFY_IS_APPROX(m1_for_prod.prod(), p); VERIFY_IS_APPROX(m1.real().minCoeff(), internal::real(minc)); VERIFY_IS_APPROX(m1.real().maxCoeff(), internal::real(maxc)); @@ -61,7 +65,7 @@ template void matrixRedux(const MatrixType& m) Index c1 = internal::random(c0+1,cols)-c0; VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).sum(), m1.block(r0,c0,r1,c1).eval().sum()); VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).mean(), m1.block(r0,c0,r1,c1).eval().mean()); - VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).prod(), m1.block(r0,c0,r1,c1).eval().prod()); + VERIFY_IS_APPROX(m1_for_prod.block(r0,c0,r1,c1).prod(), m1_for_prod.block(r0,c0,r1,c1).eval().prod()); VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).real().minCoeff(), m1.block(r0,c0,r1,c1).real().eval().minCoeff()); VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).real().maxCoeff(), m1.block(r0,c0,r1,c1).real().eval().maxCoeff()); @@ -78,6 +82,8 @@ template void vectorRedux(const VectorType& w) Index size = w.size(); VectorType v = VectorType::Random(size); + VectorType v_for_prod = VectorType::Ones(size) + Scalar(0.2) * v; // see comment above declaration of m1_for_prod + for(int i = 1; i < size; i++) { Scalar s(0), p(1); @@ -85,12 +91,12 @@ template void vectorRedux(const VectorType& w) for(int j = 0; j < i; j++) { s += v[j]; - p *= v[j]; + p *= v_for_prod[j]; minc = (std::min)(minc, internal::real(v[j])); maxc = (std::max)(maxc, internal::real(v[j])); } VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(s - v.head(i).sum()), Scalar(1)); - VERIFY_IS_APPROX(p, v.head(i).prod()); + VERIFY_IS_APPROX(p, v_for_prod.head(i).prod()); VERIFY_IS_APPROX(minc, v.real().head(i).minCoeff()); VERIFY_IS_APPROX(maxc, v.real().head(i).maxCoeff()); } @@ -102,12 +108,12 @@ template void vectorRedux(const VectorType& w) for(int j = i; j < size; j++) { s += v[j]; - p *= v[j]; + p *= v_for_prod[j]; minc = (std::min)(minc, internal::real(v[j])); maxc = (std::max)(maxc, internal::real(v[j])); } VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(s - v.tail(size-i).sum()), Scalar(1)); - VERIFY_IS_APPROX(p, v.tail(size-i).prod()); + VERIFY_IS_APPROX(p, v_for_prod.tail(size-i).prod()); VERIFY_IS_APPROX(minc, v.real().tail(size-i).minCoeff()); VERIFY_IS_APPROX(maxc, v.real().tail(size-i).maxCoeff()); } @@ -119,12 +125,12 @@ template void vectorRedux(const VectorType& w) for(int j = i; j < size-i; j++) { s += v[j]; - p *= v[j]; + p *= v_for_prod[j]; minc = (std::min)(minc, internal::real(v[j])); maxc = (std::max)(maxc, internal::real(v[j])); } VERIFY_IS_MUCH_SMALLER_THAN(internal::abs(s - v.segment(i, size-2*i).sum()), Scalar(1)); - VERIFY_IS_APPROX(p, v.segment(i, size-2*i).prod()); + VERIFY_IS_APPROX(p, v_for_prod.segment(i, size-2*i).prod()); VERIFY_IS_APPROX(minc, v.real().segment(i, size-2*i).minCoeff()); VERIFY_IS_APPROX(maxc, v.real().segment(i, size-2*i).maxCoeff()); } @@ -139,6 +145,9 @@ template void vectorRedux(const VectorType& w) void test_redux() { + // the max size cannot be too large, otherwise reduxion operations obviously generate large errors. + int maxsize = (std::min)(100,EIGEN_TEST_MAX_SIZE); + EIGEN_UNUSED_VARIABLE(maxsize); for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( matrixRedux(Matrix()) ); CALL_SUBTEST_1( matrixRedux(Array()) ); @@ -146,19 +155,19 @@ void test_redux() CALL_SUBTEST_2( matrixRedux(Array2f()) ); CALL_SUBTEST_3( matrixRedux(Matrix4d()) ); CALL_SUBTEST_3( matrixRedux(Array4d()) ); - CALL_SUBTEST_4( matrixRedux(MatrixXcf(3, 3)) ); - CALL_SUBTEST_4( matrixRedux(ArrayXXcf(3, 3)) ); - CALL_SUBTEST_5( matrixRedux(MatrixXd(8, 12)) ); - CALL_SUBTEST_5( matrixRedux(ArrayXXd(8, 12)) ); - CALL_SUBTEST_6( matrixRedux(MatrixXi(8, 12)) ); - CALL_SUBTEST_6( matrixRedux(ArrayXXi(8, 12)) ); + CALL_SUBTEST_4( matrixRedux(MatrixXcf(internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_4( matrixRedux(ArrayXXcf(internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_5( matrixRedux(MatrixXd (internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_5( matrixRedux(ArrayXXd (internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_6( matrixRedux(MatrixXi (internal::random(1,maxsize), internal::random(1,maxsize))) ); + CALL_SUBTEST_6( matrixRedux(ArrayXXi (internal::random(1,maxsize), internal::random(1,maxsize))) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_7( vectorRedux(Vector4f()) ); CALL_SUBTEST_7( vectorRedux(Array4f()) ); - CALL_SUBTEST_5( vectorRedux(VectorXd(10)) ); - CALL_SUBTEST_5( vectorRedux(ArrayXd(10)) ); - CALL_SUBTEST_8( vectorRedux(VectorXf(33)) ); - CALL_SUBTEST_8( vectorRedux(ArrayXf(33)) ); + CALL_SUBTEST_5( vectorRedux(VectorXd(internal::random(1,maxsize))) ); + CALL_SUBTEST_5( vectorRedux(ArrayXd(internal::random(1,maxsize))) ); + CALL_SUBTEST_8( vectorRedux(VectorXf(internal::random(1,maxsize))) ); + CALL_SUBTEST_8( vectorRedux(ArrayXf(internal::random(1,maxsize))) ); } } diff --git a/gtsam/3rdparty/Eigen/test/schur_complex.cpp b/gtsam/3rdparty/Eigen/test/schur_complex.cpp index a2a89fd67..15532e2cc 100644 --- a/gtsam/3rdparty/Eigen/test/schur_complex.cpp +++ b/gtsam/3rdparty/Eigen/test/schur_complex.cpp @@ -80,7 +80,7 @@ template void schur(int size = MatrixType::ColsAtCompileTim void test_schur_complex() { CALL_SUBTEST_1(( schur() )); - CALL_SUBTEST_2(( schur(internal::random(1,50)) )); + CALL_SUBTEST_2(( schur(internal::random(1,EIGEN_TEST_MAX_SIZE/4)) )); CALL_SUBTEST_3(( schur, 1, 1> >() )); CALL_SUBTEST_4(( schur >() )); diff --git a/gtsam/3rdparty/Eigen/test/schur_real.cpp b/gtsam/3rdparty/Eigen/test/schur_real.cpp index 58717fa1a..ba0947d8c 100644 --- a/gtsam/3rdparty/Eigen/test/schur_real.cpp +++ b/gtsam/3rdparty/Eigen/test/schur_real.cpp @@ -99,7 +99,7 @@ template void schur(int size = MatrixType::ColsAtCompileTim void test_schur_real() { CALL_SUBTEST_1(( schur() )); - CALL_SUBTEST_2(( schur(internal::random(1,50)) )); + CALL_SUBTEST_2(( schur(internal::random(1,EIGEN_TEST_MAX_SIZE/4)) )); CALL_SUBTEST_3(( schur >() )); CALL_SUBTEST_4(( schur >() )); diff --git a/gtsam/3rdparty/Eigen/test/selfadjoint.cpp b/gtsam/3rdparty/Eigen/test/selfadjoint.cpp index 622045f20..db66017c1 100644 --- a/gtsam/3rdparty/Eigen/test/selfadjoint.cpp +++ b/gtsam/3rdparty/Eigen/test/selfadjoint.cpp @@ -54,20 +54,23 @@ template void selfadjoint(const MatrixType& m) void bug_159() { - Matrix3d m = Matrix3d::Random().selfadjointView(); + Matrix3d m = Matrix3d::Random().selfadjointView(); + EIGEN_UNUSED_VARIABLE(m) } void test_selfadjoint() { for(int i = 0; i < g_repeat ; i++) { - int s = internal::random(1,20); EIGEN_UNUSED_VARIABLE(s); + int s = internal::random(1,EIGEN_TEST_MAX_SIZE); EIGEN_UNUSED_VARIABLE(s); CALL_SUBTEST_1( selfadjoint(Matrix()) ); CALL_SUBTEST_2( selfadjoint(Matrix()) ); CALL_SUBTEST_3( selfadjoint(Matrix3cf()) ); CALL_SUBTEST_4( selfadjoint(MatrixXcd(s,s)) ); CALL_SUBTEST_5( selfadjoint(Matrix(s, s)) ); + + EIGEN_UNUSED_VARIABLE(s) } CALL_SUBTEST_1( bug_159() ); diff --git a/gtsam/3rdparty/Eigen/test/simplicial_cholesky.cpp b/gtsam/3rdparty/Eigen/test/simplicial_cholesky.cpp new file mode 100644 index 000000000..f1af0e467 --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/simplicial_cholesky.cpp @@ -0,0 +1,55 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse_solver.h" + +template void test_simplicial_cholesky_T() +{ + SimplicialCholesky, Lower> chol_colmajor_lower; + SimplicialCholesky, Upper> chol_colmajor_upper; + SimplicialLLT, Lower> llt_colmajor_lower; + SimplicialLDLT, Upper> llt_colmajor_upper; + SimplicialLDLT, Lower> ldlt_colmajor_lower; + SimplicialLDLT, Upper> ldlt_colmajor_upper; + + check_sparse_spd_solving(chol_colmajor_lower); + check_sparse_spd_solving(chol_colmajor_upper); + check_sparse_spd_solving(llt_colmajor_lower); + check_sparse_spd_solving(llt_colmajor_upper); + check_sparse_spd_solving(ldlt_colmajor_lower); + check_sparse_spd_solving(ldlt_colmajor_upper); + + check_sparse_spd_determinant(chol_colmajor_lower); + check_sparse_spd_determinant(chol_colmajor_upper); + check_sparse_spd_determinant(llt_colmajor_lower); + check_sparse_spd_determinant(llt_colmajor_upper); + check_sparse_spd_determinant(ldlt_colmajor_lower); + check_sparse_spd_determinant(ldlt_colmajor_upper); +} + +void test_simplicial_cholesky() +{ + CALL_SUBTEST_1(test_simplicial_cholesky_T()); + CALL_SUBTEST_2(test_simplicial_cholesky_T >()); +} diff --git a/gtsam/3rdparty/Eigen/test/smallvectors.cpp b/gtsam/3rdparty/Eigen/test/smallvectors.cpp index 144944162..4c09d4ec6 100644 --- a/gtsam/3rdparty/Eigen/test/smallvectors.cpp +++ b/gtsam/3rdparty/Eigen/test/smallvectors.cpp @@ -22,6 +22,7 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . +#define EIGEN_NO_STATIC_ASSERT #include "main.h" template void smallVectors() @@ -29,6 +30,7 @@ template void smallVectors() typedef Matrix V2; typedef Matrix V3; typedef Matrix V4; + typedef Matrix VX; Scalar x1 = internal::random(), x2 = internal::random(), x3 = internal::random(), @@ -45,6 +47,29 @@ template void smallVectors() VERIFY_IS_APPROX(x3, v3.z()); VERIFY_IS_APPROX(x3, v4.z()); VERIFY_IS_APPROX(x4, v4.w()); + + if (!NumTraits::IsInteger) + { + VERIFY_RAISES_ASSERT(V3(2, 1)) + VERIFY_RAISES_ASSERT(V3(3, 2)) + VERIFY_RAISES_ASSERT(V3(Scalar(3), 1)) + VERIFY_RAISES_ASSERT(V3(3, Scalar(1))) + VERIFY_RAISES_ASSERT(V3(Scalar(3), Scalar(1))) + VERIFY_RAISES_ASSERT(V3(Scalar(123), Scalar(123))) + + VERIFY_RAISES_ASSERT(V4(1, 3)) + VERIFY_RAISES_ASSERT(V4(2, 4)) + VERIFY_RAISES_ASSERT(V4(1, Scalar(4))) + VERIFY_RAISES_ASSERT(V4(Scalar(1), 4)) + VERIFY_RAISES_ASSERT(V4(Scalar(1), Scalar(4))) + VERIFY_RAISES_ASSERT(V4(Scalar(123), Scalar(123))) + + VERIFY_RAISES_ASSERT(VX(3, 2)) + VERIFY_RAISES_ASSERT(VX(Scalar(3), 1)) + VERIFY_RAISES_ASSERT(VX(3, Scalar(1))) + VERIFY_RAISES_ASSERT(VX(Scalar(3), Scalar(1))) + VERIFY_RAISES_ASSERT(VX(Scalar(123), Scalar(123))) + } } void test_smallvectors() diff --git a/gtsam/3rdparty/Eigen/test/sparse.h b/gtsam/3rdparty/Eigen/test/sparse.h index cc9da4855..860d9ad9c 100644 --- a/gtsam/3rdparty/Eigen/test/sparse.h +++ b/gtsam/3rdparty/Eigen/test/sparse.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008 Daniel Gomez Ferro +// Copyright (C) 2008-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -23,6 +23,7 @@ // Eigen. If not, see . #ifndef EIGEN_TESTSPARSE_H +#define EIGEN_TESTSPARSE_H #define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET @@ -67,30 +68,36 @@ enum { * \param zeroCoords and nonzeroCoords allows to get the coordinate lists of the non zero, * and zero coefficients respectively. */ -template void +template void initSparse(double density, - Matrix& refMat, - SparseMatrix& sparseMat, + Matrix& refMat, + SparseMatrix& sparseMat, int flags = 0, std::vector* zeroCoords = 0, std::vector* nonzeroCoords = 0) { + enum { IsRowMajor = SparseMatrix::IsRowMajor }; sparseMat.setZero(); - sparseMat.reserve(int(refMat.rows()*refMat.cols()*density)); - for(int j=0; j(0,1) < density) ? internal::random() : Scalar(0); if ((flags&ForceNonZeroDiag) && (i==j)) { v = internal::random()*Scalar(3.); v = v*v + Scalar(5.); } - if ((flags & MakeLowerTriangular) && j>i) + if ((flags & MakeLowerTriangular) && aj>ai) v = Scalar(0); - else if ((flags & MakeUpperTriangular) && jpush_back(Vector2i(i,j)); + nonzeroCoords->push_back(Vector2i(ai,aj)); } else if (zeroCoords) { - zeroCoords->push_back(Vector2i(i,j)); + zeroCoords->push_back(Vector2i(ai,aj)); } - refMat(i,j) = v; + refMat(ai,aj) = v; } } - sparseMat.finalize(); + //sparseMat.finalize(); } -template void +template void initSparse(double density, - Matrix& refMat, - DynamicSparseMatrix& sparseMat, + Matrix& refMat, + DynamicSparseMatrix& sparseMat, int flags = 0, std::vector* zeroCoords = 0, std::vector* nonzeroCoords = 0) { + enum { IsRowMajor = DynamicSparseMatrix::IsRowMajor }; sparseMat.setZero(); sparseMat.reserve(int(refMat.rows()*refMat.cols()*density)); - for(int j=0; j(0,1) < density) ? internal::random() : Scalar(0); if ((flags&ForceNonZeroDiag) && (i==j)) { v = internal::random()*Scalar(3.); v = v*v + Scalar(5.); } - if ((flags & MakeLowerTriangular) && j>i) + if ((flags & MakeLowerTriangular) && aj>ai) v = Scalar(0); - else if ((flags & MakeUpperTriangular) && jpush_back(Vector2i(i,j)); + nonzeroCoords->push_back(Vector2i(ai,aj)); } else if (zeroCoords) { - zeroCoords->push_back(Vector2i(i,j)); + zeroCoords->push_back(Vector2i(ai,aj)); } - refMat(i,j) = v; + refMat(ai,aj) = v; } } sparseMat.finalize(); @@ -181,4 +193,5 @@ initSparse(double density, } } +#include #endif // EIGEN_TESTSPARSE_H diff --git a/gtsam/3rdparty/Eigen/test/sparse_basic.cpp b/gtsam/3rdparty/Eigen/test/sparse_basic.cpp index 6f54d2ebc..637c5db51 100644 --- a/gtsam/3rdparty/Eigen/test/sparse_basic.cpp +++ b/gtsam/3rdparty/Eigen/test/sparse_basic.cpp @@ -1,6 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // +// Copyright (C) 2008-2011 Gael Guennebaud // Copyright (C) 2008 Daniel Gomez Ferro // // Eigen is free software; you can redistribute it and/or @@ -109,7 +110,8 @@ template void sparse_basic(const SparseMatrixType& re DenseMatrix m1(rows,cols); m1.setZero(); SparseMatrixType m2(rows,cols); - m2.reserve(10); + if(internal::random()%2) + m2.reserve(VectorXi::Constant(m2.outerSize(), 2)); for (int j=0; j void sparse_basic(const SparseMatrixType& re DenseMatrix m1(rows,cols); m1.setZero(); SparseMatrixType m2(rows,cols); - m2.reserve(10); + if(internal::random()%2) + m2.reserve(VectorXi::Constant(m2.outerSize(), 2)); + for (int k=0; k(0,rows-1); + int j = internal::random(0,cols-1); + if ((m1.coeff(i,j)==Scalar(0)) && (internal::random()%2)) + m2.insert(i,j) = m1(i,j) = internal::random(); + else + { + Scalar v = internal::random(); + m2.coeffRef(i,j) += v; + m1(i,j) += v; + } + } + VERIFY_IS_APPROX(m2,m1); + } + + // test insert (un-compressed) + for(int mode=0;mode<4;++mode) + { + DenseMatrix m1(rows,cols); + m1.setZero(); + SparseMatrixType m2(rows,cols); + VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? m2.innerSize() : std::max(1,m2.innerSize()/8))); + m2.reserve(r); for (int k=0; k(0,rows-1); int j = internal::random(0,cols-1); if (m1.coeff(i,j)==Scalar(0)) m2.insert(i,j) = m1(i,j) = internal::random(); + if(mode==3) + m2.reserve(r); } - m2.finalize(); + if(internal::random()%2) + m2.makeCompressed(); VERIFY_IS_APPROX(m2,m1); } @@ -166,7 +196,13 @@ template void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(m1+=m2, refM1+=refM2); VERIFY_IS_APPROX(m1-=m2, refM1-=refM2); - VERIFY_IS_APPROX(m1.col(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0))); + if(SparseMatrixType::IsRowMajor) + VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0))); + else + VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0))); + + VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate()); + VERIFY_IS_APPROX(m1.real(), refM1.real()); refM4.setRandom(); // sparse cwise* dense @@ -190,10 +226,37 @@ template void sparse_basic(const SparseMatrixType& re DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows); SparseMatrixType m2(rows, rows); initSparse(density, refMat2, m2); - int j0 = internal::random(0,rows-1); - int j1 = internal::random(0,rows-1); - VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.col(j0)); - VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.col(j0)+refMat2.col(j1)); + int j0 = internal::random(0,rows-1); + int j1 = internal::random(0,rows-1); + if(SparseMatrixType::IsRowMajor) + VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.row(j0)); + else + VERIFY_IS_APPROX(m2.innerVector(j0), refMat2.col(j0)); + + if(SparseMatrixType::IsRowMajor) + VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.row(j0)+refMat2.row(j1)); + else + VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.col(j0)+refMat2.col(j1)); + + SparseMatrixType m3(rows,rows); + m3.reserve(VectorXi::Constant(rows,rows/2)); + for(int j=0; j0) + VERIFY(j==internal::real(m3.innerVector(j).lastCoeff())); + } + m3.makeCompressed(); + for(int j=0; j0) + VERIFY(j==internal::real(m3.innerVector(j).lastCoeff())); + } + //m2.innerVector(j0) = 2*m2.innerVector(j1); //refMat2.col(j0) = 2*refMat2.col(j1); //VERIFY_IS_APPROX(m2, refMat2); @@ -204,12 +267,19 @@ template void sparse_basic(const SparseMatrixType& re DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows); SparseMatrixType m2(rows, rows); initSparse(density, refMat2, m2); - int j0 = internal::random(0,rows-2); - int j1 = internal::random(0,rows-2); + int j0 = internal::random(0,rows-2); + int j1 = internal::random(0,rows-2); int n0 = internal::random(1,rows-(std::max)(j0,j1)); - VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0)); - VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0), - refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0)); + if(SparseMatrixType::IsRowMajor) + VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(j0,0,n0,cols)); + else + VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0)); + if(SparseMatrixType::IsRowMajor) + VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0), + refMat2.block(j0,0,n0,cols)+refMat2.block(j1,0,n0,cols)); + else + VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0), + refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0)); //m2.innerVectors(j0,n0) = m2.innerVectors(j0,n0) + m2.innerVectors(j1,n0); //refMat2.block(0,j0,rows,n0) = refMat2.block(0,j0,rows,n0) + refMat2.block(0,j1,rows,n0); } @@ -239,7 +309,11 @@ template void sparse_basic(const SparseMatrixType& re else { countTrueNonZero++; - m2.insertBackByOuterInner(j,i) = refM2(i,j) = Scalar(1); + m2.insertBackByOuterInner(j,i) = Scalar(1); + if(SparseMatrixType::IsRowMajor) + refM2(j,i) = Scalar(1); + else + refM2(i,j) = Scalar(1); } } } @@ -250,8 +324,52 @@ template void sparse_basic(const SparseMatrixType& re VERIFY(countTrueNonZero==m2.nonZeros()); VERIFY_IS_APPROX(m2, refM2); } + + // test setFromTriplets + { + typedef Triplet TripletType; + std::vector triplets; + int ntriplets = rows*cols; + triplets.reserve(ntriplets); + DenseMatrix refMat(rows,cols); + refMat.setZero(); + for(int i=0;i(0,rows-1); + int c = internal::random(0,cols-1); + Scalar v = internal::random(); + triplets.push_back(TripletType(r,c,v)); + refMat(r,c) += v; + } + SparseMatrixType m(rows,cols); + m.setFromTriplets(triplets.begin(), triplets.end()); + VERIFY_IS_APPROX(m, refMat); + } + + // test triangularView + { + DenseMatrix refMat2(rows, rows), refMat3(rows, rows); + SparseMatrixType m2(rows, rows), m3(rows, rows); + initSparse(density, refMat2, m2); + refMat3 = refMat2.template triangularView(); + m3 = m2.template triangularView(); + VERIFY_IS_APPROX(m3, refMat3); + + refMat3 = refMat2.template triangularView(); + m3 = m2.template triangularView(); + VERIFY_IS_APPROX(m3, refMat3); + + refMat3 = refMat2.template triangularView(); + m3 = m2.template triangularView(); + VERIFY_IS_APPROX(m3, refMat3); + + refMat3 = refMat2.template triangularView(); + m3 = m2.template triangularView(); + VERIFY_IS_APPROX(m3, refMat3); + } // test selfadjointView + if(!SparseMatrixType::IsRowMajor) { DenseMatrix refMat2(rows, rows), refMat3(rows, rows); SparseMatrixType m2(rows, rows), m3(rows, rows); @@ -268,15 +386,25 @@ template void sparse_basic(const SparseMatrixType& re initSparse(density, refMat2, m2); VERIFY_IS_APPROX(m2.eval(), refMat2.sparseView().eval()); } + + // test diagonal + { + DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows); + SparseMatrixType m2(rows, rows); + initSparse(density, refMat2, m2); + VERIFY_IS_APPROX(m2.diagonal(), refMat2.diagonal().eval()); + } } void test_sparse_basic() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( sparse_basic(SparseMatrix(8, 8)) ); - CALL_SUBTEST_2( sparse_basic(SparseMatrix >(16, 16)) ); - CALL_SUBTEST_1( sparse_basic(SparseMatrix(33, 33)) ); - - CALL_SUBTEST_3( sparse_basic(DynamicSparseMatrix(8, 8)) ); + int s = Eigen::internal::random(1,50); + CALL_SUBTEST_1(( sparse_basic(SparseMatrix(8, 8)) )); + CALL_SUBTEST_2(( sparse_basic(SparseMatrix, ColMajor>(s, s)) )); + CALL_SUBTEST_2(( sparse_basic(SparseMatrix, RowMajor>(s, s)) )); + CALL_SUBTEST_1(( sparse_basic(SparseMatrix(s, s)) )); + CALL_SUBTEST_1(( sparse_basic(SparseMatrix(s, s)) )); + CALL_SUBTEST_1(( sparse_basic(SparseMatrix(s, s)) )); } } diff --git a/gtsam/3rdparty/Eigen/test/sparse_permutations.cpp b/gtsam/3rdparty/Eigen/test/sparse_permutations.cpp new file mode 100644 index 000000000..4d22e358e --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/sparse_permutations.cpp @@ -0,0 +1,202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse.h" + +template void sparse_permutations(const SparseMatrixType& ref) +{ + typedef typename SparseMatrixType::Index Index; + + const Index rows = ref.rows(); + const Index cols = ref.cols(); + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::Index Index; + typedef SparseMatrix OtherSparseMatrixType; + typedef Matrix DenseMatrix; + typedef Matrix VectorI; + + double density = (std::max)(8./(rows*cols), 0.01); + + SparseMatrixType mat(rows, cols), up(rows,cols), lo(rows,cols); + OtherSparseMatrixType res; + DenseMatrix mat_d = DenseMatrix::Zero(rows, cols), up_sym_d, lo_sym_d, res_d; + + initSparse(density, mat_d, mat, 0); + + up = mat.template triangularView(); + lo = mat.template triangularView(); + + up_sym_d = mat_d.template selfadjointView(); + lo_sym_d = mat_d.template selfadjointView(); + + VERIFY_IS_APPROX(mat, mat_d); + VERIFY_IS_APPROX(up, DenseMatrix(mat_d.template triangularView())); + VERIFY_IS_APPROX(lo, DenseMatrix(mat_d.template triangularView())); + + PermutationMatrix p, p_null; + VectorI pi; + randomPermutationVector(pi, cols); + p.indices() = pi; + + res = mat*p; + res_d = mat_d*p; + VERIFY(res.isApprox(res_d) && "mat*p"); + + res = p*mat; + res_d = p*mat_d; + VERIFY(res.isApprox(res_d) && "p*mat"); + + res = mat*p.inverse(); + res_d = mat*p.inverse(); + VERIFY(res.isApprox(res_d) && "mat*inv(p)"); + + res = p.inverse()*mat; + res_d = p.inverse()*mat_d; + VERIFY(res.isApprox(res_d) && "inv(p)*mat"); + + res = mat.twistedBy(p); + res_d = (p * mat_d) * p.inverse(); + VERIFY(res.isApprox(res_d) && "p*mat*inv(p)"); + + + res = mat.template selfadjointView().twistedBy(p_null); + res_d = up_sym_d; + VERIFY(res.isApprox(res_d) && "full selfadjoint upper to full"); + + res = mat.template selfadjointView().twistedBy(p_null); + res_d = lo_sym_d; + VERIFY(res.isApprox(res_d) && "full selfadjoint lower to full"); + + + res = up.template selfadjointView().twistedBy(p_null); + res_d = up_sym_d; + VERIFY(res.isApprox(res_d) && "upper selfadjoint to full"); + + res = lo.template selfadjointView().twistedBy(p_null); + res_d = lo_sym_d; + VERIFY(res.isApprox(res_d) && "lower selfadjoint full"); + + + res = mat.template selfadjointView(); + res_d = up_sym_d; + VERIFY(res.isApprox(res_d) && "full selfadjoint upper to full"); + + res = mat.template selfadjointView(); + res_d = lo_sym_d; + VERIFY(res.isApprox(res_d) && "full selfadjoint lower to full"); + + res = up.template selfadjointView(); + res_d = up_sym_d; + VERIFY(res.isApprox(res_d) && "upper selfadjoint to full"); + + res = lo.template selfadjointView(); + res_d = lo_sym_d; + VERIFY(res.isApprox(res_d) && "lower selfadjoint full"); + + + res.template selfadjointView() = mat.template selfadjointView(); + res_d = up_sym_d.template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint upper to upper"); + + res.template selfadjointView() = mat.template selfadjointView(); + res_d = up_sym_d.template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint upper to lower"); + + res.template selfadjointView() = mat.template selfadjointView(); + res_d = lo_sym_d.template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint lower to upper"); + + res.template selfadjointView() = mat.template selfadjointView(); + res_d = lo_sym_d.template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint lower to lower"); + + + + res.template selfadjointView() = mat.template selfadjointView().twistedBy(p); + res_d = ((p * up_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint upper twisted to upper"); + + res.template selfadjointView() = mat.template selfadjointView().twistedBy(p); + res_d = ((p * lo_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint lower twisted to upper"); + + res.template selfadjointView() = mat.template selfadjointView().twistedBy(p); + res_d = ((p * lo_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint lower twisted to lower"); + + res.template selfadjointView() = mat.template selfadjointView().twistedBy(p); + res_d = ((p * up_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "full selfadjoint upper twisted to lower"); + + + res.template selfadjointView() = up.template selfadjointView().twistedBy(p); + res_d = ((p * up_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "upper selfadjoint twisted to upper"); + + res.template selfadjointView() = lo.template selfadjointView().twistedBy(p); + res_d = ((p * lo_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "lower selfadjoint twisted to upper"); + + res.template selfadjointView() = lo.template selfadjointView().twistedBy(p); + res_d = ((p * lo_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "lower selfadjoint twisted to lower"); + + res.template selfadjointView() = up.template selfadjointView().twistedBy(p); + res_d = ((p * up_sym_d) * p.inverse()).eval().template triangularView(); + VERIFY(res.isApprox(res_d) && "upper selfadjoint twisted to lower"); + + + res = mat.template selfadjointView().twistedBy(p); + res_d = (p * up_sym_d) * p.inverse(); + VERIFY(res.isApprox(res_d) && "full selfadjoint upper twisted to full"); + + res = mat.template selfadjointView().twistedBy(p); + res_d = (p * lo_sym_d) * p.inverse(); + VERIFY(res.isApprox(res_d) && "full selfadjoint lower twisted to full"); + + res = up.template selfadjointView().twistedBy(p); + res_d = (p * up_sym_d) * p.inverse(); + VERIFY(res.isApprox(res_d) && "upper selfadjoint twisted to full"); + + res = lo.template selfadjointView().twistedBy(p); + res_d = (p * lo_sym_d) * p.inverse(); + VERIFY(res.isApprox(res_d) && "lower selfadjoint twisted to full"); +} + +template void sparse_permutations_all(int size) +{ + CALL_SUBTEST(( sparse_permutations(SparseMatrix(size,size)) )); + CALL_SUBTEST(( sparse_permutations(SparseMatrix(size,size)) )); + CALL_SUBTEST(( sparse_permutations(SparseMatrix(size,size)) )); + CALL_SUBTEST(( sparse_permutations(SparseMatrix(size,size)) )); +} + +void test_sparse_permutations() +{ + for(int i = 0; i < g_repeat; i++) { + int s = Eigen::internal::random(1,50); + CALL_SUBTEST_1(( sparse_permutations_all(s) )); + CALL_SUBTEST_2(( sparse_permutations_all >(s) )); + } +} diff --git a/gtsam/3rdparty/Eigen/test/sparse_product.cpp b/gtsam/3rdparty/Eigen/test/sparse_product.cpp index a53ab3f1b..2c28d1131 100644 --- a/gtsam/3rdparty/Eigen/test/sparse_product.cpp +++ b/gtsam/3rdparty/Eigen/test/sparse_product.cpp @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008 Daniel Gomez Ferro +// Copyright (C) 2008-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -24,11 +24,37 @@ #include "sparse.h" -template void sparse_product(const SparseMatrixType& ref) +template struct test_outer; + +template struct test_outer { + static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) { + int c = internal::random(0,m2.cols()-1); + int c1 = internal::random(0,m2.cols()-1); + VERIFY_IS_APPROX(m4=m2.col(c)*refMat2.col(c1).transpose(), refMat4=refMat2.col(c)*refMat2.col(c1).transpose()); + VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.col(c).transpose(), refMat4=refMat2.col(c1)*refMat2.col(c).transpose()); + } +}; + +template struct test_outer { + static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) { + int r = internal::random(0,m2.rows()-1); + int c1 = internal::random(0,m2.cols()-1); + VERIFY_IS_APPROX(m4=m2.row(r).transpose()*refMat2.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat2.col(c1).transpose()); + VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.row(r), refMat4=refMat2.col(c1)*refMat2.row(r)); + } +}; + +// (m2,m4,refMat2,refMat4,dv1); +// VERIFY_IS_APPROX(m4=m2.innerVector(c)*dv1.transpose(), refMat4=refMat2.colVector(c)*dv1.transpose()); +// VERIFY_IS_APPROX(m4=dv1*mcm.col(c).transpose(), refMat4=dv1*refMat2.col(c).transpose()); + +template void sparse_product() { typedef typename SparseMatrixType::Index Index; - const Index rows = ref.rows(); - const Index cols = ref.cols(); + Index n = 100; + const Index rows = internal::random(1,n); + const Index cols = internal::random(1,n); + const Index depth = internal::random(1,n); typedef typename SparseMatrixType::Scalar Scalar; enum { Flags = SparseMatrixType::Flags }; @@ -41,50 +67,71 @@ template void sparse_product(const SparseMatrixType& // test matrix-matrix product { - DenseMatrix refMat2 = DenseMatrix::Zero(rows, rows); - DenseMatrix refMat3 = DenseMatrix::Zero(rows, rows); - DenseMatrix refMat4 = DenseMatrix::Zero(rows, rows); - DenseMatrix refMat5 = DenseMatrix::Random(rows, rows); + DenseMatrix refMat2 = DenseMatrix::Zero(rows, depth); + DenseMatrix refMat2t = DenseMatrix::Zero(depth, rows); + DenseMatrix refMat3 = DenseMatrix::Zero(depth, cols); + DenseMatrix refMat3t = DenseMatrix::Zero(cols, depth); + DenseMatrix refMat4 = DenseMatrix::Zero(rows, cols); + DenseMatrix refMat4t = DenseMatrix::Zero(cols, rows); + DenseMatrix refMat5 = DenseMatrix::Random(depth, cols); + DenseMatrix refMat6 = DenseMatrix::Random(rows, rows); DenseMatrix dm4 = DenseMatrix::Zero(rows, rows); - DenseVector dv1 = DenseVector::Random(rows); - SparseMatrixType m2(rows, rows); - SparseMatrixType m3(rows, rows); - SparseMatrixType m4(rows, rows); - initSparse(density, refMat2, m2); - initSparse(density, refMat3, m3); - initSparse(density, refMat4, m4); +// DenseVector dv1 = DenseVector::Random(rows); + SparseMatrixType m2 (rows, depth); + SparseMatrixType m2t(depth, rows); + SparseMatrixType m3 (depth, cols); + SparseMatrixType m3t(cols, depth); + SparseMatrixType m4 (rows, cols); + SparseMatrixType m4t(cols, rows); + SparseMatrixType m6(rows, rows); + initSparse(density, refMat2, m2); + initSparse(density, refMat2t, m2t); + initSparse(density, refMat3, m3); + initSparse(density, refMat3t, m3t); + initSparse(density, refMat4, m4); + initSparse(density, refMat4t, m4t); + initSparse(density, refMat6, m6); - int c = internal::random(0,rows-1); +// int c = internal::random(0,depth-1); + // sparse * sparse VERIFY_IS_APPROX(m4=m2*m3, refMat4=refMat2*refMat3); - VERIFY_IS_APPROX(m4=m2.transpose()*m3, refMat4=refMat2.transpose()*refMat3); - VERIFY_IS_APPROX(m4=m2.transpose()*m3.transpose(), refMat4=refMat2.transpose()*refMat3.transpose()); - VERIFY_IS_APPROX(m4=m2*m3.transpose(), refMat4=refMat2*refMat3.transpose()); + VERIFY_IS_APPROX(m4=m2t.transpose()*m3, refMat4=refMat2t.transpose()*refMat3); + VERIFY_IS_APPROX(m4=m2t.transpose()*m3t.transpose(), refMat4=refMat2t.transpose()*refMat3t.transpose()); + VERIFY_IS_APPROX(m4=m2*m3t.transpose(), refMat4=refMat2*refMat3t.transpose()); VERIFY_IS_APPROX(m4 = m2*m3/s1, refMat4 = refMat2*refMat3/s1); VERIFY_IS_APPROX(m4 = m2*m3*s1, refMat4 = refMat2*refMat3*s1); VERIFY_IS_APPROX(m4 = s2*m2*m3*s1, refMat4 = s2*refMat2*refMat3*s1); + VERIFY_IS_APPROX(m4=(m2*m3).pruned(0), refMat4=refMat2*refMat3); + VERIFY_IS_APPROX(m4=(m2t.transpose()*m3).pruned(0), refMat4=refMat2t.transpose()*refMat3); + VERIFY_IS_APPROX(m4=(m2t.transpose()*m3t.transpose()).pruned(0), refMat4=refMat2t.transpose()*refMat3t.transpose()); + VERIFY_IS_APPROX(m4=(m2*m3t.transpose()).pruned(0), refMat4=refMat2*refMat3t.transpose()); + + // test aliasing + m4 = m2; refMat4 = refMat2; + VERIFY_IS_APPROX(m4=m4*m3, refMat4=refMat4*refMat3); + // sparse * dense VERIFY_IS_APPROX(dm4=m2*refMat3, refMat4=refMat2*refMat3); - VERIFY_IS_APPROX(dm4=m2*refMat3.transpose(), refMat4=refMat2*refMat3.transpose()); - VERIFY_IS_APPROX(dm4=m2.transpose()*refMat3, refMat4=refMat2.transpose()*refMat3); - VERIFY_IS_APPROX(dm4=m2.transpose()*refMat3.transpose(), refMat4=refMat2.transpose()*refMat3.transpose()); + VERIFY_IS_APPROX(dm4=m2*refMat3t.transpose(), refMat4=refMat2*refMat3t.transpose()); + VERIFY_IS_APPROX(dm4=m2t.transpose()*refMat3, refMat4=refMat2t.transpose()*refMat3); + VERIFY_IS_APPROX(dm4=m2t.transpose()*refMat3t.transpose(), refMat4=refMat2t.transpose()*refMat3t.transpose()); VERIFY_IS_APPROX(dm4=m2*(refMat3+refMat3), refMat4=refMat2*(refMat3+refMat3)); - VERIFY_IS_APPROX(dm4=m2.transpose()*(refMat3+refMat5)*0.5, refMat4=refMat2.transpose()*(refMat3+refMat5)*0.5); + VERIFY_IS_APPROX(dm4=m2t.transpose()*(refMat3+refMat5)*0.5, refMat4=refMat2t.transpose()*(refMat3+refMat5)*0.5); // dense * sparse VERIFY_IS_APPROX(dm4=refMat2*m3, refMat4=refMat2*refMat3); - VERIFY_IS_APPROX(dm4=refMat2*m3.transpose(), refMat4=refMat2*refMat3.transpose()); - VERIFY_IS_APPROX(dm4=refMat2.transpose()*m3, refMat4=refMat2.transpose()*refMat3); - VERIFY_IS_APPROX(dm4=refMat2.transpose()*m3.transpose(), refMat4=refMat2.transpose()*refMat3.transpose()); + VERIFY_IS_APPROX(dm4=refMat2*m3t.transpose(), refMat4=refMat2*refMat3t.transpose()); + VERIFY_IS_APPROX(dm4=refMat2t.transpose()*m3, refMat4=refMat2t.transpose()*refMat3); + VERIFY_IS_APPROX(dm4=refMat2t.transpose()*m3t.transpose(), refMat4=refMat2t.transpose()*refMat3t.transpose()); // sparse * dense and dense * sparse outer product - VERIFY_IS_APPROX(m4=m2.col(c)*dv1.transpose(), refMat4=refMat2.col(c)*dv1.transpose()); - VERIFY_IS_APPROX(m4=dv1*m2.col(c).transpose(), refMat4=dv1*refMat2.col(c).transpose()); + test_outer::run(m2,m4,refMat2,refMat4); - VERIFY_IS_APPROX(m3=m3*m3, refMat3=refMat3*refMat3); + VERIFY_IS_APPROX(m6=m6*m6, refMat6=refMat6*refMat6); } // test matrix - diagonal product @@ -116,18 +163,19 @@ template void sparse_product(const SparseMatrixType& do { initSparse(density, refUp, mUp, ForceRealDiag|/*ForceNonZeroDiag|*/MakeUpperTriangular); } while (refUp.isZero()); - refLo = refUp.transpose().conjugate(); - mLo = mUp.transpose().conjugate(); + refLo = refUp.adjoint(); + mLo = mUp.adjoint(); refS = refUp + refLo; refS.diagonal() *= 0.5; mS = mUp + mLo; + // TODO be able to address the diagonal.... for (int k=0; k void sparse_produc void test_sparse_product() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( sparse_product(SparseMatrix(8, 8)) ); - CALL_SUBTEST_2( sparse_product(SparseMatrix >(16, 16)) ); - CALL_SUBTEST_1( sparse_product(SparseMatrix(33, 33)) ); - - CALL_SUBTEST_3( sparse_product(DynamicSparseMatrix(8, 8)) ); - + CALL_SUBTEST_1( (sparse_product >()) ); + CALL_SUBTEST_1( (sparse_product >()) ); + CALL_SUBTEST_2( (sparse_product, ColMajor > >()) ); + CALL_SUBTEST_2( (sparse_product, RowMajor > >()) ); CALL_SUBTEST_4( (sparse_product_regression_test, Matrix >()) ); } } diff --git a/gtsam/3rdparty/Eigen/test/sparse_solver.h b/gtsam/3rdparty/Eigen/test/sparse_solver.h new file mode 100644 index 000000000..69bf716ed --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/sparse_solver.h @@ -0,0 +1,324 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse.h" +#include + +template +void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const DenseMat& dA, const DenseRhs& db) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + + DenseRhs refX = dA.lu().solve(db); + + Rhs x(b.rows(), b.cols()); + Rhs oldb = b; + + solver.compute(A); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n"; + exit(0); + return; + } + x = solver.solve(b); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: solving failed\n"; + return; + } + VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!"); + + VERIFY(x.isApprox(refX,test_precision())); + + x.setZero(); + // test the analyze/factorize API + solver.analyzePattern(A); + solver.factorize(A); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: factorization failed (check_sparse_solving)\n"; + exit(0); + return; + } + x = solver.solve(b); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: solving failed\n"; + return; + } + VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!"); + + VERIFY(x.isApprox(refX,test_precision())); + + // test Block as the result and rhs: + { + DenseRhs x(db.rows(), db.cols()); + DenseRhs b(db), oldb(db); + x.setZero(); + x.block(0,0,x.rows(),x.cols()) = solver.solve(b.block(0,0,b.rows(),b.cols())); + VERIFY(oldb.isApprox(b) && "sparse solver testing: the rhs should not be modified!"); + VERIFY(x.isApprox(refX,test_precision())); + } +} + +template +void check_sparse_solving_real_cases(Solver& solver, const typename Solver::MatrixType& A, const Rhs& b, const Rhs& refX) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef typename Mat::RealScalar RealScalar; + + Rhs x(b.rows(), b.cols()); + + solver.compute(A); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: factorization failed (check_sparse_solving_real_cases)\n"; + exit(0); + return; + } + x = solver.solve(b); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: solving failed\n"; + return; + } + + RealScalar res_error; + // Compute the norm of the relative error + if(refX.size() != 0) + res_error = (refX - x).norm()/refX.norm(); + else + { + // Compute the relative residual norm + res_error = (b - A * x).norm()/b.norm(); + } + if (res_error > test_precision() ){ + std::cerr << "Test " << g_test_stack.back() << " failed in "EI_PP_MAKE_STRING(__FILE__) + << " (" << EI_PP_MAKE_STRING(__LINE__) << ")" << std::endl << std::endl; + abort(); + } + +} +template +void check_sparse_determinant(Solver& solver, const typename Solver::MatrixType& A, const DenseMat& dA) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef typename Mat::RealScalar RealScalar; + + solver.compute(A); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: factorization failed (check_sparse_determinant)\n"; + return; + } + + Scalar refDet = dA.determinant(); + VERIFY_IS_APPROX(refDet,solver.determinant()); +} + + +template +int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typename Solver::MatrixType& halfA, DenseMat& dA, int maxSize = 300) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef Matrix DenseMatrix; + + int size = internal::random(1,maxSize); + double density = (std::max)(8./(size*size), 0.01); + + Mat M(size, size); + DenseMatrix dM(size, size); + + initSparse(density, dM, M, ForceNonZeroDiag); + + A = M * M.adjoint(); + dA = dM * dM.adjoint(); + + halfA.resize(size,size); + halfA.template selfadjointView().rankUpdate(M); + + return size; +} + + +#ifdef TEST_REAL_CASES +template +inline std::string get_matrixfolder() +{ + std::string mat_folder = TEST_REAL_CASES; + if( internal::is_same >::value || internal::is_same >::value ) + mat_folder = mat_folder + static_cast("/complex/"); + else + mat_folder = mat_folder + static_cast("/real/"); + return mat_folder; +} +#endif + +template void check_sparse_spd_solving(Solver& solver) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef typename Mat::Index Index; + typedef SparseMatrix SpMat; + typedef Matrix DenseMatrix; + typedef Matrix DenseVector; + + // generate the problem + Mat A, halfA; + DenseMatrix dA; + int size = generate_sparse_spd_problem(solver, A, halfA, dA); + + // generate the right hand sides + int rhsCols = internal::random(1,16); + double density = (std::max)(8./(size*rhsCols), 0.1); + SpMat B(size,rhsCols); + DenseVector b = DenseVector::Random(size); + DenseMatrix dB(size,rhsCols); + initSparse(density, dB, B, ForceNonZeroDiag); + + for (int i = 0; i < g_repeat; i++) { + check_sparse_solving(solver, A, b, dA, b); + check_sparse_solving(solver, halfA, b, dA, b); + check_sparse_solving(solver, A, dB, dA, dB); + check_sparse_solving(solver, halfA, dB, dA, dB); + check_sparse_solving(solver, A, B, dA, dB); + check_sparse_solving(solver, halfA, B, dA, dB); + } + + // First, get the folder +#ifdef TEST_REAL_CASES + if (internal::is_same::value + || internal::is_same >::value) + return ; + + std::string mat_folder = get_matrixfolder(); + MatrixMarketIterator it(mat_folder); + for (; it; ++it) + { + if (it.sym() == SPD){ + Mat halfA; + PermutationMatrix pnull; + halfA.template selfadjointView() = it.matrix().template triangularView().twistedBy(pnull); + + std::cout<< " ==== SOLVING WITH MATRIX " << it.matname() << " ==== \n"; + check_sparse_solving_real_cases(solver, it.matrix(), it.rhs(), it.refX()); + check_sparse_solving_real_cases(solver, halfA, it.rhs(), it.refX()); + } + } +#endif +} + +template void check_sparse_spd_determinant(Solver& solver) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef Matrix DenseMatrix; + + // generate the problem + Mat A, halfA; + DenseMatrix dA; + generate_sparse_spd_problem(solver, A, halfA, dA, 30); + + for (int i = 0; i < g_repeat; i++) { + check_sparse_determinant(solver, A, dA); + check_sparse_determinant(solver, halfA, dA ); + } +} + +template +int generate_sparse_square_problem(Solver&, typename Solver::MatrixType& A, DenseMat& dA, int maxSize = 300) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef Matrix DenseMatrix; + + int size = internal::random(1,maxSize); + double density = (std::max)(8./(size*size), 0.01); + + A.resize(size,size); + dA.resize(size,size); + + initSparse(density, dA, A, ForceNonZeroDiag); + + return size; +} + +template void check_sparse_square_solving(Solver& solver) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef Matrix DenseMatrix; + typedef Matrix DenseVector; + + int rhsCols = internal::random(1,16); + + Mat A; + DenseMatrix dA; + int size = generate_sparse_square_problem(solver, A, dA); + + DenseVector b = DenseVector::Random(size); + DenseMatrix dB = DenseMatrix::Random(size,rhsCols); + A.makeCompressed(); + for (int i = 0; i < g_repeat; i++) { + check_sparse_solving(solver, A, b, dA, b); + check_sparse_solving(solver, A, dB, dA, dB); + } + + // First, get the folder +#ifdef TEST_REAL_CASES + if (internal::is_same::value + || internal::is_same >::value) + return ; + + std::string mat_folder = get_matrixfolder(); + MatrixMarketIterator it(mat_folder); + for (; it; ++it) + { + std::cout<< " ==== SOLVING WITH MATRIX " << it.matname() << " ==== \n"; + check_sparse_solving_real_cases(solver, it.matrix(), it.rhs(), it.refX()); + } +#endif + +} + +template void check_sparse_square_determinant(Solver& solver) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef Matrix DenseMatrix; + + // generate the problem + Mat A; + DenseMatrix dA; + generate_sparse_square_problem(solver, A, dA, 30); + A.makeCompressed(); + for (int i = 0; i < g_repeat; i++) { + check_sparse_determinant(solver, A, dA); + } +} diff --git a/gtsam/3rdparty/Eigen/test/sparse_solvers.cpp b/gtsam/3rdparty/Eigen/test/sparse_solvers.cpp index 12a1cb9b6..a3d79b0ea 100644 --- a/gtsam/3rdparty/Eigen/test/sparse_solvers.cpp +++ b/gtsam/3rdparty/Eigen/test/sparse_solvers.cpp @@ -72,6 +72,15 @@ template void sparse_solvers(int rows, int cols) initSparse(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, &zeroCoords, &nonzeroCoords); VERIFY_IS_APPROX(refMat2.template triangularView().solve(vec2), m2.template triangularView().solve(vec3)); + VERIFY_IS_APPROX(refMat2.conjugate().template triangularView().solve(vec2), + m2.conjugate().template triangularView().solve(vec3)); + { + SparseMatrix cm2(m2); + //Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr + MappedSparseMatrix mm2(rows, cols, cm2.nonZeros(), cm2.outerIndexPtr(), cm2.innerIndexPtr(), cm2.valuePtr()); + VERIFY_IS_APPROX(refMat2.conjugate().template triangularView().solve(vec2), + mm2.conjugate().template triangularView().solve(vec3)); + } // lower - transpose initSparse(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, &zeroCoords, &nonzeroCoords); diff --git a/gtsam/3rdparty/Eigen/test/sparse_vector.cpp b/gtsam/3rdparty/Eigen/test/sparse_vector.cpp index e0c281c83..09d36a51b 100644 --- a/gtsam/3rdparty/Eigen/test/sparse_vector.cpp +++ b/gtsam/3rdparty/Eigen/test/sparse_vector.cpp @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2008 Daniel Gomez Ferro +// Copyright (C) 2008-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -34,9 +34,9 @@ template void sparse_vector(int rows, int cols) typedef SparseMatrix SparseMatrixType; Scalar eps = 1e-6; - SparseMatrixType m1(rows,cols); + SparseMatrixType m1(rows,rows); SparseVectorType v1(rows), v2(rows), v3(rows); - DenseMatrix refM1 = DenseMatrix::Zero(rows, cols); + DenseMatrix refM1 = DenseMatrix::Zero(rows, rows); DenseVector refV1 = DenseVector::Random(rows), refV2 = DenseVector::Random(rows), refV3 = DenseVector::Random(rows); @@ -86,6 +86,11 @@ template void sparse_vector(int rows, int cols) VERIFY_IS_APPROX(v1.dot(v2), refV1.dot(refV2)); VERIFY_IS_APPROX(v1.dot(refV2), refV1.dot(refV2)); + VERIFY_IS_APPROX(v1.dot(m1*v2), refV1.dot(refM1*refV2)); + int i = internal::random(0,rows-1); + VERIFY_IS_APPROX(v1.dot(m1.col(i)), refV1.dot(refM1.col(i))); + + VERIFY_IS_APPROX(v1.squaredNorm(), refV1.squaredNorm()); } diff --git a/gtsam/3rdparty/Eigen/test/superlu_support.cpp b/gtsam/3rdparty/Eigen/test/superlu_support.cpp new file mode 100644 index 000000000..ad435943b --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/superlu_support.cpp @@ -0,0 +1,37 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse_solver.h" + +#include + +void test_superlu_support() +{ + SuperLU > superlu_double_colmajor; + SuperLU > > superlu_cplxdouble_colmajor; + CALL_SUBTEST_1( check_sparse_square_solving(superlu_double_colmajor) ); + CALL_SUBTEST_2( check_sparse_square_solving(superlu_cplxdouble_colmajor) ); + CALL_SUBTEST_1( check_sparse_square_determinant(superlu_double_colmajor) ); + CALL_SUBTEST_2( check_sparse_square_determinant(superlu_cplxdouble_colmajor) ); +} diff --git a/gtsam/3rdparty/Eigen/test/triangular.cpp b/gtsam/3rdparty/Eigen/test/triangular.cpp index 69decb793..4db8f8482 100644 --- a/gtsam/3rdparty/Eigen/test/triangular.cpp +++ b/gtsam/3rdparty/Eigen/test/triangular.cpp @@ -42,16 +42,8 @@ template void triangular_square(const MatrixType& m) m3(rows, cols), m4(rows, cols), r1(rows, cols), - r2(rows, cols), - mzero = MatrixType::Zero(rows, cols), - mones = MatrixType::Ones(rows, cols), - identity = Matrix - ::Identity(rows, rows), - square = Matrix - ::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + r2(rows, cols); + VectorType v2 = VectorType::Random(rows); MatrixType m1up = m1.template triangularView(); MatrixType m2up = m2.template triangularView(); @@ -113,14 +105,14 @@ template void triangular_square(const MatrixType& m) // check M * inv(L) using in place API m4 = m3; - m3.transpose().template triangularView().solveInPlace(trm4); - VERIFY(m4.cwiseAbs().isIdentity(test_precision())); + m1.transpose().template triangularView().solveInPlace(trm4); + VERIFY_IS_APPROX(m4 * m1.template triangularView(), m3); // check M * inv(U) using in place API m3 = m1.template triangularView(); m4 = m3; m3.transpose().template triangularView().solveInPlace(trm4); - VERIFY(m4.cwiseAbs().isIdentity(test_precision())); + VERIFY_IS_APPROX(m4 * m1.template triangularView(), m3); // check solve with unit diagonal m3 = m1.template triangularView(); @@ -158,21 +150,12 @@ template void triangular_rect(const MatrixType& m) m3(rows, cols), m4(rows, cols), r1(rows, cols), - r2(rows, cols), - mzero = MatrixType::Zero(rows, cols), - mones = MatrixType::Ones(rows, cols); - RMatrixType identity = Matrix - ::Identity(rows, rows), - square = Matrix - ::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + r2(rows, cols); MatrixType m1up = m1.template triangularView(); MatrixType m2up = m2.template triangularView(); - if (rows*cols>1) + if (rows>1 && cols>1) { VERIFY(m1up.isUpperTriangular()); VERIFY(m2up.transpose().isLowerTriangular()); @@ -237,15 +220,17 @@ template void triangular_rect(const MatrixType& m) void bug_159() { - Matrix3d m = Matrix3d::Random().triangularView(); + Matrix3d m = Matrix3d::Random().triangularView(); + EIGEN_UNUSED_VARIABLE(m) } void test_triangular() { + int maxsize = (std::min)(EIGEN_TEST_MAX_SIZE,20); for(int i = 0; i < g_repeat ; i++) { - int r = internal::random(2,20); EIGEN_UNUSED_VARIABLE(r); - int c = internal::random(2,20); EIGEN_UNUSED_VARIABLE(c); + int r = internal::random(2,maxsize); EIGEN_UNUSED_VARIABLE(r); + int c = internal::random(2,maxsize); EIGEN_UNUSED_VARIABLE(c); CALL_SUBTEST_1( triangular_square(Matrix()) ); CALL_SUBTEST_2( triangular_square(Matrix()) ); diff --git a/gtsam/3rdparty/Eigen/test/umfpack_support.cpp b/gtsam/3rdparty/Eigen/test/umfpack_support.cpp new file mode 100644 index 000000000..1922aa959 --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/umfpack_support.cpp @@ -0,0 +1,46 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "sparse_solver.h" + +#include + +template void test_umfpack_support_T() +{ + UmfPackLU > umfpack_colmajor; + UmfPackLU > umfpack_rowmajor; + + check_sparse_square_solving(umfpack_colmajor); + check_sparse_square_solving(umfpack_rowmajor); + + check_sparse_square_determinant(umfpack_colmajor); + check_sparse_square_determinant(umfpack_rowmajor); +} + +void test_umfpack_support() +{ + CALL_SUBTEST_1(test_umfpack_support_T()); + CALL_SUBTEST_2(test_umfpack_support_T >()); +} + diff --git a/gtsam/3rdparty/Eigen/test/vectorwiseop.cpp b/gtsam/3rdparty/Eigen/test/vectorwiseop.cpp new file mode 100644 index 000000000..d3518b7ec --- /dev/null +++ b/gtsam/3rdparty/Eigen/test/vectorwiseop.cpp @@ -0,0 +1,187 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#define EIGEN_NO_STATIC_ASSERT + +#include "main.h" + +template void vectorwiseop_array(const ArrayType& m) +{ + typedef typename ArrayType::Index Index; + typedef typename ArrayType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Array ColVectorType; + typedef Array RowVectorType; + + Index rows = m.rows(); + Index cols = m.cols(); + Index r = internal::random(0, rows-1), + c = internal::random(0, cols-1); + + ArrayType m1 = ArrayType::Random(rows, cols), + m2(rows, cols), + m3(rows, cols); + + ColVectorType colvec = ColVectorType::Random(rows); + RowVectorType rowvec = RowVectorType::Random(cols); + + // test addition + + m2 = m1; + m2.colwise() += colvec; + VERIFY_IS_APPROX(m2, m1.colwise() + colvec); + VERIFY_IS_APPROX(m2.col(c), m1.col(c) + colvec); + + VERIFY_RAISES_ASSERT(m2.colwise() += colvec.transpose()); + VERIFY_RAISES_ASSERT(m1.colwise() + colvec.transpose()); + + m2 = m1; + m2.rowwise() += rowvec; + VERIFY_IS_APPROX(m2, m1.rowwise() + rowvec); + VERIFY_IS_APPROX(m2.row(r), m1.row(r) + rowvec); + + VERIFY_RAISES_ASSERT(m2.rowwise() += rowvec.transpose()); + VERIFY_RAISES_ASSERT(m1.rowwise() + rowvec.transpose()); + + // test substraction + + m2 = m1; + m2.colwise() -= colvec; + VERIFY_IS_APPROX(m2, m1.colwise() - colvec); + VERIFY_IS_APPROX(m2.col(c), m1.col(c) - colvec); + + VERIFY_RAISES_ASSERT(m2.colwise() -= colvec.transpose()); + VERIFY_RAISES_ASSERT(m1.colwise() - colvec.transpose()); + + m2 = m1; + m2.rowwise() -= rowvec; + VERIFY_IS_APPROX(m2, m1.rowwise() - rowvec); + VERIFY_IS_APPROX(m2.row(r), m1.row(r) - rowvec); + + VERIFY_RAISES_ASSERT(m2.rowwise() -= rowvec.transpose()); + VERIFY_RAISES_ASSERT(m1.rowwise() - rowvec.transpose()); + + // test multiplication + + m2 = m1; + m2.colwise() *= colvec; + VERIFY_IS_APPROX(m2, m1.colwise() * colvec); + VERIFY_IS_APPROX(m2.col(c), m1.col(c) * colvec); + + VERIFY_RAISES_ASSERT(m2.colwise() *= colvec.transpose()); + VERIFY_RAISES_ASSERT(m1.colwise() * colvec.transpose()); + + m2 = m1; + m2.rowwise() *= rowvec; + VERIFY_IS_APPROX(m2, m1.rowwise() * rowvec); + VERIFY_IS_APPROX(m2.row(r), m1.row(r) * rowvec); + + VERIFY_RAISES_ASSERT(m2.rowwise() *= rowvec.transpose()); + VERIFY_RAISES_ASSERT(m1.rowwise() * rowvec.transpose()); + + // test quotient + + m2 = m1; + m2.colwise() /= colvec; + VERIFY_IS_APPROX(m2, m1.colwise() / colvec); + VERIFY_IS_APPROX(m2.col(c), m1.col(c) / colvec); + + VERIFY_RAISES_ASSERT(m2.colwise() /= colvec.transpose()); + VERIFY_RAISES_ASSERT(m1.colwise() / colvec.transpose()); + + m2 = m1; + m2.rowwise() /= rowvec; + VERIFY_IS_APPROX(m2, m1.rowwise() / rowvec); + VERIFY_IS_APPROX(m2.row(r), m1.row(r) / rowvec); + + VERIFY_RAISES_ASSERT(m2.rowwise() /= rowvec.transpose()); + VERIFY_RAISES_ASSERT(m1.rowwise() / rowvec.transpose()); +} + +template void vectorwiseop_matrix(const MatrixType& m) +{ + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix ColVectorType; + typedef Matrix RowVectorType; + + Index rows = m.rows(); + Index cols = m.cols(); + Index r = internal::random(0, rows-1), + c = internal::random(0, cols-1); + + MatrixType m1 = MatrixType::Random(rows, cols), + m2(rows, cols), + m3(rows, cols); + + ColVectorType colvec = ColVectorType::Random(rows); + RowVectorType rowvec = RowVectorType::Random(cols); + + // test addition + + m2 = m1; + m2.colwise() += colvec; + VERIFY_IS_APPROX(m2, m1.colwise() + colvec); + VERIFY_IS_APPROX(m2.col(c), m1.col(c) + colvec); + + VERIFY_RAISES_ASSERT(m2.colwise() += colvec.transpose()); + VERIFY_RAISES_ASSERT(m1.colwise() + colvec.transpose()); + + m2 = m1; + m2.rowwise() += rowvec; + VERIFY_IS_APPROX(m2, m1.rowwise() + rowvec); + VERIFY_IS_APPROX(m2.row(r), m1.row(r) + rowvec); + + VERIFY_RAISES_ASSERT(m2.rowwise() += rowvec.transpose()); + VERIFY_RAISES_ASSERT(m1.rowwise() + rowvec.transpose()); + + // test substraction + + m2 = m1; + m2.colwise() -= colvec; + VERIFY_IS_APPROX(m2, m1.colwise() - colvec); + VERIFY_IS_APPROX(m2.col(c), m1.col(c) - colvec); + + VERIFY_RAISES_ASSERT(m2.colwise() -= colvec.transpose()); + VERIFY_RAISES_ASSERT(m1.colwise() - colvec.transpose()); + + m2 = m1; + m2.rowwise() -= rowvec; + VERIFY_IS_APPROX(m2, m1.rowwise() - rowvec); + VERIFY_IS_APPROX(m2.row(r), m1.row(r) - rowvec); + + VERIFY_RAISES_ASSERT(m2.rowwise() -= rowvec.transpose()); + VERIFY_RAISES_ASSERT(m1.rowwise() - rowvec.transpose()); +} + +void test_vectorwiseop() +{ + CALL_SUBTEST_1(vectorwiseop_array(Array22cd())); + CALL_SUBTEST_2(vectorwiseop_array(Array())); + CALL_SUBTEST_3(vectorwiseop_array(ArrayXXf(3, 4))); + CALL_SUBTEST_4(vectorwiseop_matrix(Matrix4cf())); + CALL_SUBTEST_5(vectorwiseop_matrix(Matrix())); + CALL_SUBTEST_6(vectorwiseop_matrix(MatrixXd(7,2))); +} diff --git a/gtsam/3rdparty/Eigen/test/zerosized.cpp b/gtsam/3rdparty/Eigen/test/zerosized.cpp index 06e31cc09..c5d2cc6ed 100644 --- a/gtsam/3rdparty/Eigen/test/zerosized.cpp +++ b/gtsam/3rdparty/Eigen/test/zerosized.cpp @@ -62,8 +62,13 @@ void test_zerosized() zeroSizedMatrix(); zeroSizedMatrix >(); zeroSizedMatrix(); + zeroSizedMatrix >(); + zeroSizedMatrix >(); + zeroSizedMatrix >(); + zeroSizedMatrix >(); zeroSizedVector(); zeroSizedVector(); zeroSizedVector(); + zeroSizedVector >(); } diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/AdolcForward b/gtsam/3rdparty/Eigen/unsupported/Eigen/AdolcForward index 6c0a68d67..477c75378 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/AdolcForward +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/AdolcForward @@ -79,33 +79,22 @@ namespace Eigen { } // namespace Eigen -// the Adolc's type adouble is defined in the adtl namespace -// therefore, the following internal::* functions *must* be defined -// in the same namespace -namespace Eigen { +// Eigen's require a few additional functions which must be defined in the same namespace +// than the custom scalar type own namespace +namespace adtl { - namespace internal { - - inline const adtl::adouble& conj(const adtl::adouble& x) { return x; } - inline const adtl::adouble& real(const adtl::adouble& x) { return x; } - inline adtl::adouble imag(const adtl::adouble&) { return 0.; } - inline adtl::adouble abs(const adtl::adouble& x) { return adtl::fabs(x); } - inline adtl::adouble abs2(const adtl::adouble& x) { return x*x; } - - using adtl::sqrt; - using adtl::exp; - using adtl::log; - using adtl::sin; - using adtl::cos; - using adtl::pow; - - } +inline const adouble& conj(const adouble& x) { return x; } +inline const adouble& real(const adouble& x) { return x; } +inline adouble imag(const adouble&) { return 0.; } +inline adouble abs(const adouble& x) { return fabs(x); } +inline adouble abs2(const adouble& x) { return x*x; } } namespace Eigen { template<> struct NumTraits + : NumTraits { typedef adtl::adouble Real; typedef adtl::adouble NonInteger; diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/BVH b/gtsam/3rdparty/Eigen/unsupported/Eigen/BVH index f307da2f7..d50b2bdab 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/BVH +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/BVH @@ -97,8 +97,9 @@ namespace Eigen { * The following is a simple but complete example for how to use the BVH to accelerate the search for a closest red-blue point pair: * \include BVH_Example.cpp * Output: \verbinclude BVH_Example.out - */ +} + //@{ #include "src/BVH/BVAlgorithms.h" @@ -106,6 +107,4 @@ namespace Eigen { //@} -} - #endif // EIGEN_BVH_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/CMakeLists.txt b/gtsam/3rdparty/Eigen/unsupported/Eigen/CMakeLists.txt index 219ec8ead..e961e72c5 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/CMakeLists.txt @@ -1,6 +1,6 @@ set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3 Polynomials - CholmodSupport FFT NonLinearOptimization SparseExtra SuperLUSupport UmfPackSupport IterativeSolvers - NumericalDiff Skyline MPRealSupport OpenGLSupport + FFT NonLinearOptimization SparseExtra IterativeSolvers + NumericalDiff Skyline MPRealSupport OpenGLSupport KroneckerProduct Splines ) install(FILES diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/CholmodSupport b/gtsam/3rdparty/Eigen/unsupported/Eigen/CholmodSupport deleted file mode 100644 index 8a4a130c3..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/CholmodSupport +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H -#define EIGEN_CHOLMODSUPPORT_MODULE_H - -#include "SparseExtra" - -#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" - -extern "C" { - #include -} - -namespace Eigen { - -/** \ingroup Unsupported_modules - * \defgroup CholmodSupport_Module Cholmod Support module - * - * - * \code - * #include - * \endcode - */ - -struct Cholmod {}; -#include "src/SparseExtra/CholmodSupportLegacy.h" -#include "src/SparseExtra/CholmodSupport.h" - - -} // namespace Eigen - -#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" - -#endif // EIGEN_CHOLMODSUPPORT_MODULE_H - diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/FFT b/gtsam/3rdparty/Eigen/unsupported/Eigen/FFT index c56bd63d6..e2ec71307 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/FFT +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/FFT @@ -86,23 +86,23 @@ #ifdef EIGEN_FFTW_DEFAULT // FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size # include +# include "src/FFT/ei_fftw_impl.h" namespace Eigen { -# include "src/FFT/ei_fftw_impl.h" //template typedef struct internal::fftw_impl default_fft_impl; this does not work template struct default_fft_impl : public internal::fftw_impl {}; } #elif defined EIGEN_MKL_DEFAULT // TODO // intel Math Kernel Library: fastest, commercial -- may be incompatible with Eigen in GPL form +# include "src/FFT/ei_imklfft_impl.h" namespace Eigen { -# include "src/FFT/ei_imklfft_impl.h" template struct default_fft_impl : public internal::imklfft_impl {}; } #else // internal::kissfft_impl: small, free, reasonably efficient default, derived from kissfft // +# include "src/FFT/ei_kissfft_impl.h" namespace Eigen { -# include "src/FFT/ei_kissfft_impl.h" template struct default_fft_impl : public internal::kissfft_impl {}; } diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/IterativeSolvers b/gtsam/3rdparty/Eigen/unsupported/Eigen/IterativeSolvers index bf1a9460b..4645153aa 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/IterativeSolvers +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/IterativeSolvers @@ -25,27 +25,31 @@ #ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H #define EIGEN_ITERATIVE_SOLVERS_MODULE_H -#include - -namespace Eigen { +#include /** \ingroup Unsupported_modules * \defgroup IterativeSolvers_Module Iterative solvers module * This module aims to provide various iterative linear and non linear solver algorithms. * It currently provides: * - a constrained conjugate gradient - * + * - a Householder GMRES implementation * \code * #include * \endcode */ //@{ +#include "../../Eigen/src/misc/Solve.h" +#include "../../Eigen/src/misc/SparseSolve.h" + #include "src/IterativeSolvers/IterationController.h" #include "src/IterativeSolvers/ConstrainedConjGrad.h" +#include "src/IterativeSolvers/IncompleteLU.h" +#include "../../Eigen/Jacobi" +#include "../../Eigen/Householder" +#include "src/IterativeSolvers/GMRES.h" +//#include "src/IterativeSolvers/SSORPreconditioner.h" //@} -} - #endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/KroneckerProduct b/gtsam/3rdparty/Eigen/unsupported/Eigen/KroneckerProduct new file mode 100644 index 000000000..796e386ad --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/KroneckerProduct @@ -0,0 +1,26 @@ +#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H +#define EIGEN_KRONECKER_PRODUCT_MODULE_H + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** \ingroup Unsupported_modules + * \defgroup KroneckerProduct_Module KroneckerProduct module + * + * This module contains an experimental Kronecker product implementation. + * + * \code + * #include + * \endcode + */ + +} // namespace Eigen + +#include "src/KroneckerProduct/KroneckerTensorProduct.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_KRONECKER_PRODUCT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport b/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport index 8f2396353..b5fe5e404 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport @@ -30,11 +30,12 @@ #ifndef EIGEN_MPREALSUPPORT_MODULE_H #define EIGEN_MPREALSUPPORT_MODULE_H +#include #include #include namespace Eigen { - + /** \ingroup Unsupported_modules * \defgroup MPRealSupport_Module MPFRC++ Support module * @@ -43,7 +44,7 @@ namespace Eigen { * \endcode * * This module provides support for multi precision floating point numbers - * via the MPFR C++ + * via the MPFR C++ * library which itself is built upon MPFR/GMP. * * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder. @@ -52,7 +53,7 @@ namespace Eigen { * \code #include -#include +#include #include using namespace mpfr; using namespace Eigen; @@ -106,7 +107,7 @@ int main() } }; - namespace internal { +namespace internal { template<> mpfr::mpreal random() { @@ -146,8 +147,17 @@ int main() { return a <= b || isApprox(a, b, prec); } + + template<> inline long double cast(const mpfr::mpreal& x) + { return x.toLDouble(); } + template<> inline double cast(const mpfr::mpreal& x) + { return x.toDouble(); } + template<> inline long cast(const mpfr::mpreal& x) + { return x.toLong(); } + template<> inline int cast(const mpfr::mpreal& x) + { return int(x.toLong()); } - } // end namespace internal +} // end namespace internal } #endif // EIGEN_MPREALSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/MatrixFunctions b/gtsam/3rdparty/Eigen/unsupported/Eigen/MatrixFunctions index d39c49e53..13eda8fc8 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/MatrixFunctions +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/MatrixFunctions @@ -25,6 +25,7 @@ #ifndef EIGEN_MATRIX_FUNCTIONS #define EIGEN_MATRIX_FUNCTIONS +#include #include #include #include @@ -33,8 +34,6 @@ #include #include -namespace Eigen { - /** \ingroup Unsupported_modules * \defgroup MatrixFunctions_Module Matrix functions module * \brief This module aims to provide various methods for the computation of @@ -50,9 +49,11 @@ namespace Eigen { * - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine * - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine * - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential + * - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm * - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions * - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine * - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine + * - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root * * These methods are the main entry points to this module. * @@ -69,6 +70,8 @@ namespace Eigen { #include "src/MatrixFunctions/MatrixExponential.h" #include "src/MatrixFunctions/MatrixFunction.h" +#include "src/MatrixFunctions/MatrixSquareRoot.h" +#include "src/MatrixFunctions/MatrixLogarithm.h" @@ -166,10 +169,67 @@ the z-axis. \include MatrixExponential.cpp Output: \verbinclude MatrixExponential.out -\note \p M has to be a matrix of \c float, \c double, -\c complex or \c complex . +\note \p M has to be a matrix of \c float, \c double, \c long double +\c complex, \c complex, or \c complex . +\section matrixbase_log MatrixBase::log() + +Compute the matrix logarithm. + +\code +const MatrixLogarithmReturnValue MatrixBase::log() const +\endcode + +\param[in] M invertible matrix whose logarithm is to be computed. +\returns expression representing the matrix logarithm root of \p M. + +The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that +\f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for +the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have +multiple solutions; this function returns a matrix whose eigenvalues +have imaginary part in the interval \f$ (-\pi,\pi] \f$. + +In the real case, the matrix \f$ M \f$ should be invertible and +it should have no eigenvalues which are real and negative (pairs of +complex conjugate eigenvalues are allowed). In the complex case, it +only needs to be invertible. + +This function computes the matrix logarithm using the Schur-Parlett +algorithm as implemented by MatrixBase::matrixFunction(). The +logarithm of an atomic block is computed by MatrixLogarithmAtomic, +which uses direct computation for 1-by-1 and 2-by-2 blocks and an +inverse scaling-and-squaring algorithm for bigger blocks, with the +square roots computed by MatrixBase::sqrt(). + +Details of the algorithm can be found in Section 11.6.2 of: +Nicholas J. Higham, +Functions of Matrices: Theory and Computation, +SIAM 2008. ISBN 978-0-898716-46-7. + +Example: The following program checks that +\f[ \log \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right] = \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. This is the inverse of the example used in the +documentation of \ref matrixbase_exp "exp()". + +\include MatrixLogarithm.cpp +Output: \verbinclude MatrixLogarithm.out + +\note \p M has to be a matrix of \c float, \c double, \c long double +\c complex, \c complex, or \c complex . + +\sa MatrixBase::exp(), MatrixBase::matrixFunction(), + class MatrixLogarithmAtomic, MatrixBase::sqrt(). + \section matrixbase_matrixfunction MatrixBase::matrixFunction() @@ -245,7 +305,7 @@ Output: \verbinclude MatrixSine.out -\section matrixbase_sinh const MatrixBase::sinh() +\section matrixbase_sinh MatrixBase::sinh() Compute the matrix hyperbolic sine. @@ -261,9 +321,75 @@ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdSt Example: \include MatrixSinh.cpp Output: \verbinclude MatrixSinh.out -*/ -} +\section matrixbase_sqrt MatrixBase::sqrt() + +Compute the matrix square root. + +\code +const MatrixSquareRootReturnValue MatrixBase::sqrt() const +\endcode + +\param[in] M invertible matrix whose square root is to be computed. +\returns expression representing the matrix square root of \p M. + +The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$ +whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then +\f$ S^2 = M \f$. + +In the real case, the matrix \f$ M \f$ should be invertible and +it should have no eigenvalues which are real and negative (pairs of +complex conjugate eigenvalues are allowed). In that case, the matrix +has a square root which is also real, and this is the square root +computed by this function. + +The matrix square root is computed by first reducing the matrix to +quasi-triangular form with the real Schur decomposition. The square +root of the quasi-triangular matrix can then be computed directly. The +cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur +decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder +(though the computation time in practice is likely more than this +indicates). + +Details of the algorithm can be found in: Nicholas J. Highan, +"Computing real square roots of a real matrix", Linear Algebra +Appl., 88/89:405–430, 1987. + +If the matrix is positive-definite symmetric, then the square +root is also positive-definite symmetric. In this case, it is best to +use SelfAdjointEigenSolver::operatorSqrt() to compute it. + +In the complex case, the matrix \f$ M \f$ should be invertible; +this is a restriction of the algorithm. The square root computed by +this algorithm is the one whose eigenvalues have an argument in the +interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch +cut. + +The computation is the same as in the real case, except that the +complex Schur decomposition is used to reduce the matrix to a +triangular matrix. The theoretical cost is the same. Details are in: +Åke Björck and Sven Hammarling, "A Schur method for the +square root of a matrix", Linear Algebra Appl., +52/53:127–140, 1983. + +Example: The following program checks that the square root of +\f[ \left[ \begin{array}{cc} + \cos(\frac13\pi) & -\sin(\frac13\pi) \\ + \sin(\frac13\pi) & \cos(\frac13\pi) + \end{array} \right], \f] +corresponding to a rotation over 60 degrees, is a rotation over 30 degrees: +\f[ \left[ \begin{array}{cc} + \cos(\frac16\pi) & -\sin(\frac16\pi) \\ + \sin(\frac16\pi) & \cos(\frac16\pi) + \end{array} \right]. \f] + +\include MatrixSquareRoot.cpp +Output: \verbinclude MatrixSquareRoot.out + +\sa class RealSchur, class ComplexSchur, class MatrixSquareRoot, + SelfAdjointEigenSolver::operatorSqrt(). + +*/ #endif // EIGEN_MATRIX_FUNCTIONS diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/MoreVectorization b/gtsam/3rdparty/Eigen/unsupported/Eigen/MoreVectorization index 26a01cd29..9f0a39f75 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/MoreVectorization +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/MoreVectorization @@ -9,8 +9,8 @@ namespace Eigen { * \defgroup MoreVectorization More vectorization module */ -#include "src/MoreVectorization/MathFunctions.h" - } +#include "src/MoreVectorization/MathFunctions.h" + #endif // EIGEN_MOREVECTORIZATION_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/NonLinearOptimization b/gtsam/3rdparty/Eigen/unsupported/Eigen/NonLinearOptimization index e19db33cc..0a384d649 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/NonLinearOptimization +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/NonLinearOptimization @@ -32,8 +32,6 @@ #include #include -namespace Eigen { - /** \ingroup Unsupported_modules * \defgroup NonLinearOptimization_Module Non linear optimization module * @@ -147,8 +145,5 @@ namespace Eigen { #include "src/NonLinearOptimization/HybridNonLinearSolver.h" #include "src/NonLinearOptimization/LevenbergMarquardt.h" -} - - #endif // EIGEN_NONLINEAROPTIMIZATION_MODULE diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/NumericalDiff b/gtsam/3rdparty/Eigen/unsupported/Eigen/NumericalDiff index 2a59c14d5..eebee076d 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/NumericalDiff +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/NumericalDiff @@ -59,12 +59,13 @@ namespace Eigen { * package. * */ +} + //@{ #include "src/NumericalDiff/NumericalDiff.h" //@} -} #endif // EIGEN_NUMERICALDIFF_MODULE diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/Polynomials b/gtsam/3rdparty/Eigen/unsupported/Eigen/Polynomials index 2c2f3e100..fa58b006d 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/Polynomials +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/Polynomials @@ -16,8 +16,6 @@ #undef EIGEN_HIDE_HEAVY_CODE #endif -namespace Eigen { - /** \ingroup Unsupported_modules * \defgroup Polynomials_Module Polynomials module * @@ -129,8 +127,6 @@ namespace Eigen { Output: \verbinclude PolynomialSolver1.out */ -} // namespace Eigen - #include #endif // EIGEN_POLYNOMIALS_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/Skyline b/gtsam/3rdparty/Eigen/unsupported/Eigen/Skyline index 5247b2eab..c9823f358 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/Skyline +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/Skyline @@ -11,15 +11,13 @@ #include #include -namespace Eigen { - - /** \ingroup Unsupported_modules - * \defgroup Skyline_Module Skyline module - * - * - * - * - */ +/** \ingroup Unsupported_modules + * \defgroup Skyline_Module Skyline module + * + * + * + * + */ #include "src/Skyline/SkylineUtil.h" #include "src/Skyline/SkylineMatrixBase.h" @@ -28,8 +26,6 @@ namespace Eigen { #include "src/Skyline/SkylineInplaceLU.h" #include "src/Skyline/SkylineProduct.h" -} // namespace Eigen - #include "Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SKYLINE_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/SparseExtra b/gtsam/3rdparty/Eigen/unsupported/Eigen/SparseExtra index 7bab7c72b..340c34736 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/SparseExtra +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/SparseExtra @@ -10,13 +10,13 @@ #include #include #include +#include +#include #ifdef EIGEN_GOOGLEHASH_SUPPORT #include #endif -namespace Eigen { - /** \ingroup Unsupported_modules * \defgroup SparseExtra_Module SparseExtra module * @@ -27,42 +27,20 @@ namespace Eigen { * \endcode */ -struct DefaultBackend {}; - - -// solver flags -enum { - CompleteFactorization = 0x0000, // the default - IncompleteFactorization = 0x0001, - MemoryEfficient = 0x0002, - - // For LLT Cholesky: - SupernodalMultifrontal = 0x0010, - SupernodalLeftLooking = 0x0020, - - // Ordering methods: - NaturalOrdering = 0x0100, // the default - MinimumDegree_AT_PLUS_A = 0x0200, - MinimumDegree_ATA = 0x0300, - ColApproxMinimumDegree = 0x0400, - Metis = 0x0500, - Scotch = 0x0600, - Chaco = 0x0700, - OrderingMask = 0x0f00 -}; #include "../../Eigen/src/misc/Solve.h" +#include "../../Eigen/src/misc/SparseSolve.h" +#include "src/SparseExtra/DynamicSparseMatrix.h" +#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h" #include "src/SparseExtra/RandomSetter.h" -#include "src/SparseExtra/Solve.h" -#include "src/SparseExtra/Amd.h" -#include "src/SparseExtra/SimplicialCholesky.h" -#include "src/SparseExtra/SparseLLT.h" -#include "src/SparseExtra/SparseLDLTLegacy.h" -#include "src/SparseExtra/SparseLU.h" +#include "src/SparseExtra/MarketIO.h" -} // namespace Eigen +#if !defined(_WIN32) +#include +#include "src/SparseExtra/MatrixMarketIterator.h" +#endif #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenvaluesCommon.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/Splines similarity index 63% rename from gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenvaluesCommon.h rename to gtsam/3rdparty/Eigen/unsupported/Eigen/Splines index 749bea795..33769b0e4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenvaluesCommon.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/Splines @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2010 Jitse Niesen +// Copyright (C) 20010-2011 Hauke Heibel // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -22,10 +22,25 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . -#ifndef EIGEN_EIGENVALUES_COMMON_H -#define EIGEN_EIGENVALUES_COMMON_H +#ifndef EIGEN_SPLINES_MODULE_H +#define EIGEN_SPLINES_MODULE_H +namespace Eigen +{ +/** \ingroup Unsupported_modules + * \defgroup Splines_Module Spline and spline fitting module + * + * This module provides a simple multi-dimensional spline class while + * offering most basic functionality to fit a spline to point sets. + * + * \code + * #include + * \endcode + */ +} +#include "src/Splines/SplineFwd.h" +#include "src/Splines/Spline.h" +#include "src/Splines/SplineFitting.h" -#endif // EIGEN_EIGENVALUES_COMMON_H - +#endif // EIGEN_SPLINES_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/SuperLUSupport b/gtsam/3rdparty/Eigen/unsupported/Eigen/SuperLUSupport deleted file mode 100644 index 89cb649b2..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/SuperLUSupport +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H -#define EIGEN_SUPERLUSUPPORT_MODULE_H - -#include "SparseExtra" - -#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" - -typedef int int_t; -#include -#include -#include - -namespace Eigen { struct SluMatrix; } - -namespace Eigen { - -/** \ingroup Unsupported_modules - * \defgroup SuperLUSupport_Module Super LU support - * - * - * - * \code - * #include - * \endcode - */ - -struct SuperLU {}; - -#include "src/SparseExtra/SuperLUSupport.h" - -} // namespace Eigen - -#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" - -#endif // EIGEN_SUPERLUSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/UmfPackSupport b/gtsam/3rdparty/Eigen/unsupported/Eigen/UmfPackSupport deleted file mode 100644 index c8b1e7c1f..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/UmfPackSupport +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H -#define EIGEN_UMFPACKSUPPORT_MODULE_H - -#include "SparseExtra" - -#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" - -extern "C" { -#include -} - -namespace Eigen { - -/** \ingroup Unsupported_modules - * \defgroup UmfPackSupport_Module UmfPack support module - * - * - * - * - * \code - * #include - * \endcode - */ - -struct UmfPack {}; - -#include "src/SparseExtra/UmfPackSupport.h" - -} // namespace Eigen - -#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" - -#endif // EIGEN_UMFPACKSUPPORT_MODULE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h index 4fe168a88..e5442a5ef 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h @@ -40,7 +40,7 @@ public: template AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} template - AutoDiffJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {} + AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {} enum { InputsAtCompileTime = Functor::InputsAtCompileTime, diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h index 7517035d9..70d3222f5 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h @@ -101,7 +101,7 @@ class AutoDiffScalar /** Conversion from a scalar constant to an active scalar. * The derivatives are set to zero. */ - explicit AutoDiffScalar(const Real& value) + /*explicit*/ AutoDiffScalar(const Real& value) : m_value(value) { if(m_derivatives.size()>0) @@ -151,6 +151,27 @@ class AutoDiffScalar inline const DerType& derivatives() const { return m_derivatives; } inline DerType& derivatives() { return m_derivatives; } + inline bool operator< (const Scalar& other) const { return m_value < other; } + inline bool operator<=(const Scalar& other) const { return m_value <= other; } + inline bool operator> (const Scalar& other) const { return m_value > other; } + inline bool operator>=(const Scalar& other) const { return m_value >= other; } + inline bool operator==(const Scalar& other) const { return m_value == other; } + inline bool operator!=(const Scalar& other) const { return m_value != other; } + + friend inline bool operator< (const Scalar& a, const AutoDiffScalar& b) { return a < b.value(); } + friend inline bool operator<=(const Scalar& a, const AutoDiffScalar& b) { return a <= b.value(); } + friend inline bool operator> (const Scalar& a, const AutoDiffScalar& b) { return a > b.value(); } + friend inline bool operator>=(const Scalar& a, const AutoDiffScalar& b) { return a >= b.value(); } + friend inline bool operator==(const Scalar& a, const AutoDiffScalar& b) { return a == b.value(); } + friend inline bool operator!=(const Scalar& a, const AutoDiffScalar& b) { return a != b.value(); } + + template inline bool operator< (const AutoDiffScalar& b) const { return m_value < b.value(); } + template inline bool operator<=(const AutoDiffScalar& b) const { return m_value <= b.value(); } + template inline bool operator> (const AutoDiffScalar& b) const { return m_value > b.value(); } + template inline bool operator>=(const AutoDiffScalar& b) const { return m_value >= b.value(); } + template inline bool operator==(const AutoDiffScalar& b) const { return m_value == b.value(); } + template inline bool operator!=(const AutoDiffScalar& b) const { return m_value != b.value(); } + inline const AutoDiffScalar operator+(const Scalar& other) const { return AutoDiffScalar(m_value + other, m_derivatives); @@ -195,6 +216,24 @@ class AutoDiffScalar return *this; } + inline const AutoDiffScalar operator-(const Scalar& b) const + { + return AutoDiffScalar(m_value - b, m_derivatives); + } + + friend inline const AutoDiffScalar, const DerType> > + operator-(const Scalar& a, const AutoDiffScalar& b) + { + return AutoDiffScalar, const DerType> > + (a - b.value(), -b.derivatives()); + } + + inline AutoDiffScalar& operator-=(const Scalar& other) + { + value() -= other; + return *this; + } + template inline const AutoDiffScalar, const DerType,const typename internal::remove_all::type> > operator-(const AutoDiffScalar& other) const @@ -213,7 +252,6 @@ class AutoDiffScalar return *this; } - template inline const AutoDiffScalar, const DerType> > operator-() const { @@ -267,7 +305,7 @@ class AutoDiffScalar { return AutoDiffScalar, const DerType> >( other / a.value(), - a.derivatives() * (-Scalar(1)/other)); + a.derivatives() * (Scalar(-other) / (a.value()*a.value()))); } // inline const AutoDiffScalar, DerType>::Type > @@ -330,6 +368,19 @@ class AutoDiffScalar return *this; } + inline AutoDiffScalar& operator/=(const Scalar& other) + { + *this = *this / other; + return *this; + } + + template + inline AutoDiffScalar& operator/=(const AutoDiffScalar& other) + { + *this = *this / other; + return *this; + } + protected: Scalar m_value; DerType m_derivatives; @@ -463,16 +514,14 @@ template ReturnType; }; -template -struct scalar_product_traits,T> +template +struct scalar_product_traits,typename DerType::Scalar> { typedef AutoDiffScalar ReturnType; }; } // end namespace internal -} // end namespace Eigen - #define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \ template \ inline const Eigen::AutoDiffScalar::type>::Scalar>, const typename Eigen::internal::remove_all::type> > \ @@ -483,87 +532,109 @@ struct scalar_product_traits,T> CODE; \ } -namespace std -{ - EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs, - return ReturnType(std::abs(x.value()), x.derivatives() * (sign(x.value())));) - - EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt, - Scalar sqrtx = std::sqrt(x.value()); - return ReturnType(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));) - - EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos, - return ReturnType(std::cos(x.value()), x.derivatives() * (-std::sin(x.value())));) - - EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin, - return ReturnType(std::sin(x.value()),x.derivatives() * std::cos(x.value()));) - - EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp, - Scalar expx = std::exp(x.value()); - return ReturnType(expx,x.derivatives() * expx);) - - EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log, - return ReturnType(std::log(x.value()),x.derivatives() * (Scalar(1)/x.value()));) - - template - inline const Eigen::AutoDiffScalar::Scalar>, const DerType> > - pow(const Eigen::AutoDiffScalar& x, typename Eigen::internal::traits::Scalar y) - { - using namespace Eigen; - typedef typename Eigen::internal::traits::Scalar Scalar; - return AutoDiffScalar, const DerType> >( - std::pow(x.value(),y), - x.derivatives() * (y * std::pow(x.value(),y-1))); - } - -} - -namespace Eigen { - -namespace internal { - template inline const AutoDiffScalar& conj(const AutoDiffScalar& x) { return x; } template inline const AutoDiffScalar& real(const AutoDiffScalar& x) { return x; } template inline typename DerType::Scalar imag(const AutoDiffScalar&) { return 0.; } +template +inline AutoDiffScalar (min)(const AutoDiffScalar& x, const T& y) { return (x <= y ? x : y); } +template +inline AutoDiffScalar (max)(const AutoDiffScalar& x, const T& y) { return (x >= y ? x : y); } +template +inline AutoDiffScalar (min)(const T& x, const AutoDiffScalar& y) { return (x < y ? x : y); } +template +inline AutoDiffScalar (max)(const T& x, const AutoDiffScalar& y) { return (x > y ? x : y); } +#define sign(x) x >= 0 ? 1 : -1 // required for abs function below + EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs, + using std::abs; return ReturnType(abs(x.value()), x.derivatives() * (sign(x.value())));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2, + using internal::abs2; return ReturnType(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt, + using std::sqrt; Scalar sqrtx = sqrt(x.value()); return ReturnType(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos, + using std::cos; + using std::sin; return ReturnType(cos(x.value()), x.derivatives() * (-sin(x.value())));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin, + using std::sin; + using std::cos; return ReturnType(sin(x.value()),x.derivatives() * cos(x.value()));) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp, + using std::exp; Scalar expx = exp(x.value()); return ReturnType(expx,x.derivatives() * expx);) EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log, + using std::log; return ReturnType(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));) template -inline const AutoDiffScalar::Scalar>, DerType> > -pow(const AutoDiffScalar& x, typename traits::Scalar y) -{ return std::pow(x,y);} +inline const Eigen::AutoDiffScalar::Scalar>, const DerType> > +pow(const Eigen::AutoDiffScalar& x, typename Eigen::internal::traits::Scalar y) +{ + using namespace Eigen; + typedef typename Eigen::internal::traits::Scalar Scalar; + return AutoDiffScalar, const DerType> >( + std::pow(x.value(),y), + x.derivatives() * (y * std::pow(x.value(),y-1))); +} -} // end namespace internal + +template +inline const AutoDiffScalar::Scalar,Dynamic,1> > +atan2(const AutoDiffScalar& a, const AutoDiffScalar& b) +{ + using std::atan2; + using std::max; + typedef typename internal::traits::Scalar Scalar; + typedef AutoDiffScalar > PlainADS; + PlainADS ret; + ret.value() = atan2(a.value(), b.value()); + + Scalar tmp2 = a.value() * a.value(); + Scalar tmp3 = b.value() * b.value(); + Scalar tmp4 = tmp3/(tmp2+tmp3); + + if (tmp4!=0) + ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) * (tmp2+tmp3); + + return ret; +} + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan, + using std::tan; + using std::cos; + return ReturnType(tan(x.value()),x.derivatives() * (Scalar(1)/internal::abs2(cos(x.value()))));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin, + using std::sqrt; + using std::asin; + return ReturnType(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-internal::abs2(x.value()))));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos, + using std::sqrt; + using std::acos; + return ReturnType(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-internal::abs2(x.value()))));) #undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY template struct NumTraits > : NumTraits< typename NumTraits::Real > { + typedef AutoDiffScalar::Real,DerType::RowsAtCompileTime,DerType::ColsAtCompileTime> > Real; typedef AutoDiffScalar NonInteger; typedef AutoDiffScalar& Nested; enum{ diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h index d65a97740..6cba656ff 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h @@ -25,6 +25,8 @@ #ifndef EIGEN_BVALGORITHMS_H #define EIGEN_BVALGORITHMS_H +namespace Eigen { + namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -301,4 +303,6 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini return minimum; } +} // end namespace Eigen + #endif // EIGEN_BVALGORITHMS_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/KdBVH.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/KdBVH.h index 028b4811e..13a154d5c 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/KdBVH.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/KdBVH.h @@ -25,6 +25,8 @@ #ifndef KDBVH_H_INCLUDED #define KDBVH_H_INCLUDED +namespace Eigen { + namespace internal { //internal pair class for the BVH--used instead of std::pair because of alignment @@ -69,7 +71,7 @@ struct get_boxes_helper { * * \param _Scalar The underlying scalar type of the bounding boxes * \param _Dim The dimension of the space in which the hierarchy lives - * \param _Object The object type that lives in the hierarchy. It must have value semantics. Either internal::bounding_box(_Object) must + * \param _Object The object type that lives in the hierarchy. It must have value semantics. Either bounding_box(_Object) must * be defined and return an AlignedBox<_Scalar, _Dim> or bounding boxes must be provided to the tree initializer. * * This class provides a simple (as opposed to optimized) implementation of a bounding volume hierarchy analogous to a Kd-tree. @@ -92,14 +94,14 @@ public: KdBVH() {} - /** Given an iterator range over \a Object references, constructs the BVH. Requires that internal::bounding_box(Object) return a Volume. */ + /** Given an iterator range over \a Object references, constructs the BVH. Requires that bounding_box(Object) return a Volume. */ template KdBVH(Iter begin, Iter end) { init(begin, end, 0, 0); } //int is recognized by init as not being an iterator type /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, constructs the BVH */ template KdBVH(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { init(begin, end, boxBegin, boxEnd); } /** Given an iterator range over \a Object references, constructs the BVH, overwriting whatever is in there currently. - * Requires that internal::bounding_box(Object) return a Volume. */ + * Requires that bounding_box(Object) return a Volume. */ template void init(Iter begin, Iter end) { init(begin, end, 0, 0); } /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, @@ -230,4 +232,6 @@ private: ObjectList objects; }; +} // end namespace Eigen + #endif //KDBVH_H_INCLUDED diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/CMakeLists.txt b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/CMakeLists.txt index cd442cefa..f3180b52b 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/CMakeLists.txt @@ -9,3 +9,5 @@ ADD_SUBDIRECTORY(NumericalDiff) ADD_SUBDIRECTORY(Polynomials) ADD_SUBDIRECTORY(Skyline) ADD_SUBDIRECTORY(SparseExtra) +ADD_SUBDIRECTORY(KroneckerProduct) +ADD_SUBDIRECTORY(Splines) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h index a06f6739e..5c36db70c 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h @@ -22,6 +22,8 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . +namespace Eigen { + namespace internal { // FFTW uses non-const arguments @@ -269,4 +271,6 @@ namespace internal { } // end namespace internal +} // end namespace Eigen + /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h index 04b98b083..c3cbb7f01 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h @@ -22,6 +22,8 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . +namespace Eigen { + namespace internal { // This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft @@ -426,5 +428,6 @@ struct kissfft_impl } // end namespace internal -/* vim: set filetype=cpp et sw=2 ts=2 ai: */ +} // end namespace Eigen +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h index 4d8e183ee..f8a550553 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h @@ -50,6 +50,8 @@ #include +namespace Eigen { + namespace internal { /** \ingroup IterativeSolvers_Module @@ -195,4 +197,6 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_CONSTRAINEDCG_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h new file mode 100644 index 000000000..e100617d1 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -0,0 +1,394 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Kolja Brix +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_GMRES_H +#define EIGEN_GMRES_H + +namespace Eigen { + +namespace internal { + +/** + * Generalized Minimal Residual Algorithm based on the + * Arnoldi algorithm implemented with Householder reflections. + * + * Parameters: + * \param mat matrix of linear system of equations + * \param Rhs right hand side vector of linear system of equations + * \param x on input: initial guess, on output: solution + * \param precond preconditioner used + * \param iters on input: maximum number of iterations to perform + * on output: number of iterations performed + * \param restart number of iterations for a restart + * \param tol_error on input: residual tolerance + * on output: residuum achieved + * + * \sa IterativeMethods::bicgstab() + * + * + * For references, please see: + * + * Saad, Y. and Schultz, M. H. + * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems. + * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869. + * + * Saad, Y. + * Iterative Methods for Sparse Linear Systems. + * Society for Industrial and Applied Mathematics, Philadelphia, 2003. + * + * Walker, H. F. + * Implementations of the GMRES method. + * Comput.Phys.Comm. 53, 1989, pp. 311 - 320. + * + * Walker, H. F. + * Implementation of the GMRES Method using Householder Transformations. + * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163. + * + */ +template +bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, + int &iters, const int &restart, typename Dest::RealScalar & tol_error) { + + using std::sqrt; + using std::abs; + + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix < RealScalar, Dynamic, 1 > RealVectorType; + typedef Matrix < Scalar, Dynamic, 1 > VectorType; + typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType; + + RealScalar tol = tol_error; + const int maxIters = iters; + iters = 0; + + const int m = mat.rows(); + + VectorType p0 = rhs - mat*x; + VectorType r0 = precond.solve(p0); +// RealScalar r0_sqnorm = r0.squaredNorm(); + + VectorType w = VectorType::Zero(restart + 1); + + FMatrixType H = FMatrixType::Zero(m, restart + 1); + VectorType tau = VectorType::Zero(restart + 1); + std::vector < JacobiRotation < Scalar > > G(restart); + + // generate first Householder vector + VectorType e; + RealScalar beta; + r0.makeHouseholder(e, tau.coeffRef(0), beta); + w(0)=(Scalar) beta; + H.bottomLeftCorner(m - 1, 1) = e; + + for (int k = 1; k <= restart; ++k) { + + ++iters; + + VectorType v = VectorType::Unit(m, k - 1), workspace(m); + + // apply Householder reflections H_{1} ... H_{k-1} to v + for (int i = k - 1; i >= 0; --i) { + v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); + } + + // apply matrix M to v: v = mat * v; + VectorType t=mat*v; + v=precond.solve(t); + + // apply Householder reflections H_{k-1} ... H_{1} to v + for (int i = 0; i < k; ++i) { + v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); + } + + if (v.tail(m - k).norm() != 0.0) { + + if (k <= restart) { + + // generate new Householder vector + VectorType e(m - k - 1); + RealScalar beta; + v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta); + H.col(k).tail(m - k - 1) = e; + + // apply Householder reflection H_{k} to v + v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data()); + + } + } + + if (k > 1) { + for (int i = 0; i < k - 1; ++i) { + // apply old Givens rotations to v + v.applyOnTheLeft(i, i + 1, G[i].adjoint()); + } + } + + if (k ().solveInPlace(y); + + // use Horner-like scheme to calculate solution vector + VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1); + + // apply Householder reflection H_{k} to x_new + x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data()); + + for (int i = k - 2; i >= 0; --i) { + x_new += y(i) * VectorType::Unit(m, i); + // apply Householder reflection H_{i} to x_new + x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); + } + + x += x_new; + + if (stop) { + return true; + } else { + k=0; + + // reset data for a restart r0 = rhs - mat * x; + VectorType p0=mat*x; + VectorType p1=precond.solve(p0); + r0 = rhs - p1; +// r0_sqnorm = r0.squaredNorm(); + w = VectorType::Zero(restart + 1); + H = FMatrixType::Zero(m, restart + 1); + tau = VectorType::Zero(restart + 1); + + // generate first Householder vector + RealScalar beta; + r0.makeHouseholder(e, tau.coeffRef(0), beta); + w(0)=(Scalar) beta; + H.bottomLeftCorner(m - 1, 1) = e; + + } + + } + + + + } + + return false; + +} + +} + +template< typename _MatrixType, + typename _Preconditioner = DiagonalPreconditioner > +class GMRES; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A GMRES solver for sparse square problems + * + * This class allows to solve for A.x = b sparse linear problems using a generalized minimal + * residual method. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * GMRES > solver(A); + * x = solver.solve(b); + * std::cout << "#iterations: " << solver.iterations() << std::endl; + * std::cout << "estimated error: " << solver.error() << std::endl; + * // update b, and solve again + * x = solver.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. Here is a step by + * step execution example starting with a random guess and printing the evolution + * of the estimated error: + * * \code + * x = VectorXd::Random(n); + * solver.setMaxIterations(1); + * int i = 0; + * do { + * x = solver.solveWithGuess(b,x); + * std::cout << i << " : " << solver.error() << std::endl; + * ++i; + * } while (solver.info()!=Success && i<100); + * \endcode + * Note that such a step by step excution is slightly slower. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, typename _Preconditioner> +class GMRES : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::mp_matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; + +private: + int m_restart; + +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + GMRES() : Base(), m_restart(30) {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + GMRES(const MatrixType& A) : Base(A), m_restart(30) {} + + ~GMRES() {} + + /** Get the number of iterations after that a restart is performed. + */ + int get_restart() { return m_restart; } + + /** Set the number of iterations after that a restart is performed. + * \param restart number of iterations for a restarti, default is 30. + */ + void set_restart(const int restart) { m_restart=restart; } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * \a x0 as an initial solution. + * + * \sa compute() + */ + template + inline const internal::solve_retval_with_guess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "GMRES is not initialized."); + eigen_assert(Base::rows()==b.rows() + && "GMRES::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval_with_guess + (*this, b.derived(), x0); + } + + /** \internal */ + template + void _solveWithGuess(const Rhs& b, Dest& x) const + { + bool failed = false; + for(int j=0; j + void _solve(const Rhs& b, Dest& x) const + { + x.setZero(); + _solveWithGuess(b,x); + } + +protected: + +}; + + +namespace internal { + + template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef GMRES<_MatrixType, _Preconditioner> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GMRES_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h new file mode 100644 index 000000000..e02f1f0c8 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h @@ -0,0 +1,128 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_INCOMPLETE_LU_H +#define EIGEN_INCOMPLETE_LU_H + +namespace Eigen { + +template +class IncompleteLU +{ + typedef _Scalar Scalar; + typedef Matrix Vector; + typedef typename Vector::Index Index; + typedef SparseMatrix FactorType; + + public: + typedef Matrix MatrixType; + + IncompleteLU() : m_isInitialized(false) {} + + template + IncompleteLU(const MatrixType& mat) : m_isInitialized(false) + { + compute(mat); + } + + Index rows() const { return m_lu.rows(); } + Index cols() const { return m_lu.cols(); } + + template + IncompleteLU& compute(const MatrixType& mat) + { + m_lu = mat; + int size = mat.cols(); + Vector diag(size); + for(int i=0; i + void _solve(const Rhs& b, Dest& x) const + { + x = m_lu.template triangularView().solve(b); + x = m_lu.template triangularView().solve(x); + } + + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "IncompleteLU is not initialized."); + eigen_assert(cols()==b.rows() + && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + protected: + FactorType m_lu; + bool m_isInitialized; +}; + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef IncompleteLU<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INCOMPLETE_LU_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h index a65793cd5..dfb97e239 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h @@ -75,6 +75,8 @@ #ifndef EIGEN_ITERATION_CONTROLLER_H #define EIGEN_ITERATION_CONTROLLER_H +namespace Eigen { + /** \ingroup IterativeSolvers_Module * \class IterationController * @@ -163,4 +165,6 @@ class IterationController }; +} // end namespace Eigen + #endif // EIGEN_ITERATION_CONTROLLER_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h new file mode 100644 index 000000000..4aad69d0e --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h @@ -0,0 +1,200 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire NUENTSA WAKAM . + +#ifndef EIGEN_SCALING_H +#define EIGEN_SCALING_H +/** + * \ingroup IterativeSolvers_Module + * \brief iterative scaling algorithm to equilibrate rows and column norms in matrices + * + * This class can be used as a preprocessing tool to accelerate the convergence of iterative methods + * + * This feature is useful to limit the pivoting amount during LU/ILU factorization + * The scaling strategy as presented here preserves the symmetry of the problem + * NOTE It is assumed that the matrix does not have empty row or column, + * + * Example with key steps + * \code + * VectorXd x(n), b(n); + * SparseMatrix A; + * // fill A and b; + * Scaling > scal; + * // Compute the left and right scaling vectors. The matrix is equilibrated at output + * scal.computeRef(A); + * // Scale the right hand side + * b = scal.LeftScaling().cwiseProduct(b); + * // Now, solve the equilibrated linear system with any available solver + * + * // Scale back the computed solution + * x = scal.RightScaling().cwiseProduct(x); + * \endcode + * + * \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix + * + * References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552 + * + * \sa \ref IncompleteLUT + */ +using std::abs; +using namespace Eigen; +template +class Scaling +{ + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + + public: + Scaling() { init(); } + + Scaling(const MatrixType& matrix) + { + init(); + compute(matrix); + } + + ~Scaling() { } + + /** + * Compute the left and right diagonal matrices to scale the input matrix @p mat + * + * FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal. + * + * \sa LeftScaling() RightScaling() + */ + void compute (const MatrixType& mat) + { + int m = mat.rows(); + int n = mat.cols(); + assert((m>0 && m == n) && "Please give a non - empty matrix"); + m_left.resize(m); + m_right.resize(n); + m_left.setOnes(); + m_right.setOnes(); + m_matrix = mat; + VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors + Dr.resize(m); Dc.resize(n); + DrRes.resize(m); DcRes.resize(n); + double EpsRow = 1.0, EpsCol = 1.0; + int its = 0; + do + { // Iterate until the infinite norm of each row and column is approximately 1 + // Get the maximum value in each row and column + Dr.setZero(); Dc.setZero(); + for (int k=0; km_tol || EpsCol > m_tol) && (its < m_maxits) ); + m_isInitialized = true; + } + /** Compute the left and right vectors to scale the vectors + * the input matrix is scaled with the computed vectors at output + * + * \sa compute() + */ + void computeRef (MatrixType& mat) + { + compute (mat); + mat = m_matrix; + } + /** Get the vector to scale the rows of the matrix + */ + VectorXd& LeftScaling() + { + return m_left; + } + + /** Get the vector to scale the columns of the matrix + */ + VectorXd& RightScaling() + { + return m_right; + } + + /** Set the tolerance for the convergence of the iterative scaling algorithm + */ + void setTolerance(double tol) + { + m_tol = tol; + } + + protected: + + void init() + { + m_tol = 1e-10; + m_maxits = 5; + m_isInitialized = false; + } + + MatrixType m_matrix; + mutable ComputationInfo m_info; + bool m_isInitialized; + VectorXd m_left; // Left scaling vector + VectorXd m_right; // m_right scaling vector + double m_tol; + int m_maxits; // Maximum number of iterations allowed +}; + +#endif \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt new file mode 100644 index 000000000..4daefebee --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_KroneckerProduct_SRCS "*.h") + +INSTALL(FILES + ${Eigen_KroneckerProduct_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/KroneckerProduct COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h new file mode 100644 index 000000000..4627705ce --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -0,0 +1,172 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Kolja Brix +// Copyright (C) 2011 Andreas Platen +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + + +#ifndef KRONECKER_TENSOR_PRODUCT_H +#define KRONECKER_TENSOR_PRODUCT_H + + +namespace Eigen { + +namespace internal { + +/*! + * Kronecker tensor product helper function for dense matrices + * + * \param A Dense matrix A + * \param B Dense matrix B + * \param AB_ Kronecker tensor product of A and B + */ +template +void kroneckerProduct_full(const Derived_A& A, const Derived_B& B, Derived_AB & AB) +{ + const unsigned int Ar = A.rows(), + Ac = A.cols(), + Br = B.rows(), + Bc = B.cols(); + for (unsigned int i=0; i +void kroneckerProduct_sparse(const Derived_A &A, const Derived_B &B, Derived_AB &AB) +{ + const unsigned int Ar = A.rows(), + Ac = A.cols(), + Br = B.rows(), + Bc = B.cols(); + AB.resize(Ar*Br,Ac*Bc); + AB.resizeNonZeros(0); + AB.reserve(A.nonZeros()*B.nonZeros()); + + for (int kA=0; kA +void kroneckerProduct(const MatrixBase& a, const MatrixBase& b, Matrix& c) +{ + c.resize(a.rows()*b.rows(),a.cols()*b.cols()); + internal::kroneckerProduct_full(a.derived(), b.derived(), c); +} + +/*! + * Computes Kronecker tensor product of two dense matrices + * + * Remark: this function uses the const cast hack and has been + * implemented to make the function call possible, where the + * output matrix is a submatrix, e.g. + * kroneckerProduct(A,B,AB.block(2,5,6,6)); + * + * \param a Dense matrix a + * \param b Dense matrix b + * \param c Kronecker tensor product of a and b + */ +template +void kroneckerProduct(const MatrixBase& a, const MatrixBase& b, MatrixBase const & c_) +{ + MatrixBase& c = const_cast& >(c_); + internal::kroneckerProduct_full(a.derived(), b.derived(), c.derived()); +} + +/*! + * Computes Kronecker tensor product of a dense and a sparse matrix + * + * \param a Dense matrix a + * \param b Sparse matrix b + * \param c Kronecker tensor product of a and b + */ +template +void kroneckerProduct(const MatrixBase& a, const SparseMatrixBase& b, SparseMatrixBase& c) +{ + internal::kroneckerProduct_sparse(a.derived(), b.derived(), c.derived()); +} + +/*! + * Computes Kronecker tensor product of a sparse and a dense matrix + * + * \param a Sparse matrix a + * \param b Dense matrix b + * \param c Kronecker tensor product of a and b + */ +template +void kroneckerProduct(const SparseMatrixBase& a, const MatrixBase& b, SparseMatrixBase& c) +{ + internal::kroneckerProduct_sparse(a.derived(), b.derived(), c.derived()); +} + +/*! + * Computes Kronecker tensor product of two sparse matrices + * + * \param a Sparse matrix a + * \param b Sparse matrix b + * \param c Kronecker tensor product of a and b + */ +template +void kroneckerProduct(const SparseMatrixBase& a, const SparseMatrixBase& b, SparseMatrixBase& c) +{ + internal::kroneckerProduct_sparse(a.derived(), b.derived(), c.derived()); +} + +} // end namespace Eigen + +#endif // KRONECKER_TENSOR_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h index 50c0ca84e..6cdd65748 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h @@ -2,6 +2,7 @@ // for linear algebra. // // Copyright (C) 2009, 2010 Jitse Niesen +// Copyright (C) 2011 Chen-Pang He // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -25,7 +26,11 @@ #ifndef EIGEN_MATRIX_EXPONENTIAL #define EIGEN_MATRIX_EXPONENTIAL -#ifdef _MSC_VER +#include "StemFunction.h" + +namespace Eigen { + +#if defined(_MSC_VER) || defined(__FreeBSD__) template Scalar log2(Scalar v) { using std::log; return log(v)/log(Scalar(2)); } #endif @@ -107,6 +112,17 @@ class MatrixExponential { */ void pade13(const MatrixType &A); + /** \brief Compute the (17,17)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + * + * This function activates only if your long double is double-double or quadruple. + * + * \param[in] A Argument of matrix exponential + */ + void pade17(const MatrixType &A); + /** \brief Compute Padé approximant to the exponential. * * Computes \c m_U, \c m_V and \c m_squarings such that @@ -127,17 +143,24 @@ class MatrixExponential { * \sa computeUV(double); */ void computeUV(float); + + /** \brief Compute Padé approximant to the exponential. + * + * \sa computeUV(double); + */ + void computeUV(long double); typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; /** \brief Reference to matrix whose exponential is to be computed. */ typename internal::nested::type m_M; - /** \brief Even-degree terms in numerator of Padé approximant. */ + /** \brief Odd-degree terms in numerator of Padé approximant. */ MatrixType m_U; - /** \brief Odd-degree terms in numerator of Padé approximant. */ + /** \brief Even-degree terms in numerator of Padé approximant. */ MatrixType m_V; /** \brief Used for temporary storage. */ @@ -153,7 +176,7 @@ class MatrixExponential { int m_squarings; /** \brief L1 norm of m_M. */ - float m_l1norm; + RealScalar m_l1norm; }; template @@ -165,7 +188,7 @@ MatrixExponential::MatrixExponential(const MatrixType &M) : m_tmp2(M.rows(),M.cols()), m_Id(MatrixType::Identity(M.rows(), M.cols())), m_squarings(0), - m_l1norm(static_cast(M.cwiseAbs().colwise().sum().maxCoeff())) + m_l1norm(M.cwiseAbs().colwise().sum().maxCoeff()) { /* empty body */ } @@ -174,18 +197,24 @@ template template void MatrixExponential::compute(ResultType &result) { +#if LDBL_MANT_DIG > 112 // rarely happens + if(sizeof(RealScalar) > 14) { + result = m_M.matrixFunction(StdStemFunctions::exp); + return; + } +#endif computeUV(RealScalar()); - m_tmp1 = m_U + m_V; // numerator of Pade approximant - m_tmp2 = -m_U + m_V; // denominator of Pade approximant + m_tmp1 = m_U + m_V; // numerator of Pade approximant + m_tmp2 = -m_U + m_V; // denominator of Pade approximant result = m_tmp2.partialPivLu().solve(m_tmp1); for (int i=0; i EIGEN_STRONG_INLINE void MatrixExponential::pade3(const MatrixType &A) { - const Scalar b[] = {120., 60., 12., 1.}; + const RealScalar b[] = {120., 60., 12., 1.}; m_tmp1.noalias() = A * A; m_tmp2 = b[3]*m_tmp1 + b[1]*m_Id; m_U.noalias() = A * m_tmp2; @@ -195,7 +224,7 @@ EIGEN_STRONG_INLINE void MatrixExponential::pade3(const MatrixType & template EIGEN_STRONG_INLINE void MatrixExponential::pade5(const MatrixType &A) { - const Scalar b[] = {30240., 15120., 3360., 420., 30., 1.}; + const RealScalar b[] = {30240., 15120., 3360., 420., 30., 1.}; MatrixType A2 = A * A; m_tmp1.noalias() = A2 * A2; m_tmp2 = b[5]*m_tmp1 + b[3]*A2 + b[1]*m_Id; @@ -206,7 +235,7 @@ EIGEN_STRONG_INLINE void MatrixExponential::pade5(const MatrixType & template EIGEN_STRONG_INLINE void MatrixExponential::pade7(const MatrixType &A) { - const Scalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.}; + const RealScalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.}; MatrixType A2 = A * A; MatrixType A4 = A2 * A2; m_tmp1.noalias() = A4 * A2; @@ -218,7 +247,7 @@ EIGEN_STRONG_INLINE void MatrixExponential::pade7(const MatrixType & template EIGEN_STRONG_INLINE void MatrixExponential::pade9(const MatrixType &A) { - const Scalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240., + const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240., 2162160., 110880., 3960., 90., 1.}; MatrixType A2 = A * A; MatrixType A4 = A2 * A2; @@ -232,7 +261,7 @@ EIGEN_STRONG_INLINE void MatrixExponential::pade9(const MatrixType & template EIGEN_STRONG_INLINE void MatrixExponential::pade13(const MatrixType &A) { - const Scalar b[] = {64764752532480000., 32382376266240000., 7771770303897600., + const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600., 1187353796428800., 129060195264000., 10559470521600., 670442572800., 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.}; MatrixType A2 = A * A; @@ -247,6 +276,30 @@ EIGEN_STRONG_INLINE void MatrixExponential::pade13(const MatrixType m_V += b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id; } +#if LDBL_MANT_DIG > 64 +template +EIGEN_STRONG_INLINE void MatrixExponential::pade17(const MatrixType &A) +{ + const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L, + 100610229646136770560000.L, 15720348382208870400000.L, + 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L, + 595373117923584000.L, 27563570274240000.L, 1060137318240000.L, + 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L, + 46512.L, 306.L, 1.L}; + MatrixType A2 = A * A; + MatrixType A4 = A2 * A2; + MatrixType A6 = A4 * A2; + m_tmp1.noalias() = A4 * A4; + m_V = b[17]*m_tmp1 + b[15]*A6 + b[13]*A4 + b[11]*A2; // used for temporary storage + m_tmp2.noalias() = m_tmp1 * m_V; + m_tmp2 += b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id; + m_U.noalias() = A * m_tmp2; + m_tmp2 = b[16]*m_tmp1 + b[14]*A6 + b[12]*A4 + b[10]*A2; + m_V.noalias() = m_tmp1 * m_tmp2; + m_V += b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id; +} +#endif + template void MatrixExponential::computeUV(float) { @@ -260,7 +313,7 @@ void MatrixExponential::computeUV(float) } else { const float maxnorm = 3.925724783138660f; m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm))); - MatrixType A = m_M / pow(Scalar(2), Scalar(static_cast(m_squarings))); + MatrixType A = m_M / pow(Scalar(2), m_squarings); pade7(A); } } @@ -282,11 +335,74 @@ void MatrixExponential::computeUV(double) } else { const double maxnorm = 5.371920351148152; m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm))); - MatrixType A = m_M / pow(Scalar(2), Scalar(m_squarings)); + MatrixType A = m_M / pow(Scalar(2), m_squarings); pade13(A); } } +template +void MatrixExponential::computeUV(long double) +{ + using std::max; + using std::pow; + using std::ceil; +#if LDBL_MANT_DIG == 53 // double precision + computeUV(double()); +#elif LDBL_MANT_DIG <= 64 // extended precision + if (m_l1norm < 4.1968497232266989671e-003L) { + pade3(m_M); + } else if (m_l1norm < 1.1848116734693823091e-001L) { + pade5(m_M); + } else if (m_l1norm < 5.5170388480686700274e-001L) { + pade7(m_M); + } else if (m_l1norm < 1.3759868875587845383e+000L) { + pade9(m_M); + } else { + const long double maxnorm = 4.0246098906697353063L; + m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm))); + MatrixType A = m_M / pow(Scalar(2), m_squarings); + pade13(A); + } +#elif LDBL_MANT_DIG <= 106 // double-double + if (m_l1norm < 3.2787892205607026992947488108213e-005L) { + pade3(m_M); + } else if (m_l1norm < 6.4467025060072760084130906076332e-003L) { + pade5(m_M); + } else if (m_l1norm < 6.8988028496595374751374122881143e-002L) { + pade7(m_M); + } else if (m_l1norm < 2.7339737518502231741495857201670e-001L) { + pade9(m_M); + } else if (m_l1norm < 1.3203382096514474905666448850278e+000L) { + pade13(m_M); + } else { + const long double maxnorm = 3.2579440895405400856599663723517L; + m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm))); + MatrixType A = m_M / pow(Scalar(2), m_squarings); + pade17(A); + } +#elif LDBL_MANT_DIG <= 112 // quadruple precison + if (m_l1norm < 1.639394610288918690547467954466970e-005L) { + pade3(m_M); + } else if (m_l1norm < 4.253237712165275566025884344433009e-003L) { + pade5(m_M); + } else if (m_l1norm < 5.125804063165764409885122032933142e-002L) { + pade7(m_M); + } else if (m_l1norm < 2.170000765161155195453205651889853e-001L) { + pade9(m_M); + } else if (m_l1norm < 1.125358383453143065081397882891878e+000L) { + pade13(m_M); + } else { + const long double maxnorm = 2.884233277829519311757165057717815L; + m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm))); + MatrixType A = m_M / pow(Scalar(2), m_squarings); + pade17(A); + } +#else + // this case should be handled in compute() + eigen_assert(false && "Bug in MatrixExponential"); +#endif // LDBL_MANT_DIG +} + /** \ingroup MatrixFunctions_Module * * \brief Proxy for the matrix exponential of some matrix (expression). @@ -348,4 +464,6 @@ const MatrixExponentialReturnValue MatrixBase::exp() const return MatrixExponentialReturnValue(derived()); } +} // end namespace Eigen + #endif // EIGEN_MATRIX_EXPONENTIAL diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h index 4b9d8a102..859de7288 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2009, 2010 Jitse Niesen +// Copyright (C) 2009-2011 Jitse Niesen // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -29,31 +29,39 @@ #include "MatrixFunctionAtomic.h" +namespace Eigen { + /** \ingroup MatrixFunctions_Module - * \brief Class for computing matrix exponentials. - * \tparam MatrixType type of the argument of the matrix function, - * expected to be an instantiation of the Matrix class template. + * \brief Class for computing matrix functions. + * \tparam MatrixType type of the argument of the matrix function, + * expected to be an instantiation of the Matrix class template. + * \tparam AtomicType type for computing matrix function of atomic blocks. + * \tparam IsComplex used internally to select correct specialization. + * + * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the + * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the + * computation of the matrix function on every block corresponding to these clusters to an object of type + * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class + * \p AtomicType should have a \p compute() member function for computing the matrix function of a block. + * + * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic */ -template ::Scalar>::IsComplex> +template ::Scalar>::IsComplex> class MatrixFunction { - private: - - typedef typename internal::traits::Index Index; - typedef typename internal::traits::Scalar Scalar; - typedef typename internal::stem_function::type StemFunction; - public: /** \brief Constructor. * - * \param[in] A argument of matrix function, should be a square matrix. - * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x. + * \param[in] A argument of matrix function, should be a square matrix. + * \param[in] atomic class for computing matrix function of atomic blocks. * - * The class stores a reference to \p A, so it should not be + * The class stores references to \p A and \p atomic, so they should not be * changed (or destroyed) before compute() is called. */ - MatrixFunction(const MatrixType& A, StemFunction f); + MatrixFunction(const MatrixType& A, AtomicType& atomic); /** \brief Compute the matrix function. * @@ -68,11 +76,11 @@ class MatrixFunction }; -/** \ingroup MatrixFunctions_Module - * \brief Partial specialization of MatrixFunction for real matrices \internal +/** \internal \ingroup MatrixFunctions_Module + * \brief Partial specialization of MatrixFunction for real matrices */ -template -class MatrixFunction +template +class MatrixFunction { private: @@ -86,16 +94,15 @@ class MatrixFunction typedef std::complex ComplexScalar; typedef Matrix ComplexMatrix; - typedef typename internal::stem_function::type StemFunction; public: /** \brief Constructor. * - * \param[in] A argument of matrix function, should be a square matrix. - * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x. + * \param[in] A argument of matrix function, should be a square matrix. + * \param[in] atomic class for computing matrix function of atomic blocks. */ - MatrixFunction(const MatrixType& A, StemFunction f) : m_A(A), m_f(f) { } + MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { } /** \brief Compute the matrix function. * @@ -111,24 +118,24 @@ class MatrixFunction { ComplexMatrix CA = m_A.template cast(); ComplexMatrix Cresult; - MatrixFunction mf(CA, m_f); + MatrixFunction mf(CA, m_atomic); mf.compute(Cresult); result = Cresult.real(); } private: typename internal::nested::type m_A; /**< \brief Reference to argument of matrix function. */ - StemFunction *m_f; /**< \brief Stem function for matrix function under consideration */ + AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */ MatrixFunction& operator=(const MatrixFunction&); }; -/** \ingroup MatrixFunctions_Module - * \brief Partial specialization of MatrixFunction for complex matrices \internal +/** \internal \ingroup MatrixFunctions_Module + * \brief Partial specialization of MatrixFunction for complex matrices */ -template -class MatrixFunction +template +class MatrixFunction { private: @@ -139,7 +146,6 @@ class MatrixFunction static const int ColsAtCompileTime = Traits::ColsAtCompileTime; static const int Options = MatrixType::Options; typedef typename NumTraits::Real RealScalar; - typedef typename internal::stem_function::type StemFunction; typedef Matrix VectorType; typedef Matrix IntVectorType; typedef Matrix DynamicIntVectorType; @@ -149,7 +155,7 @@ class MatrixFunction public: - MatrixFunction(const MatrixType& A, StemFunction f); + MatrixFunction(const MatrixType& A, AtomicType& atomic); template void compute(ResultType& result); private: @@ -168,7 +174,7 @@ class MatrixFunction DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C); typename internal::nested::type m_A; /**< \brief Reference to argument of matrix function. */ - StemFunction *m_f; /**< \brief Stem function for matrix function under consideration */ + AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */ MatrixType m_T; /**< \brief Triangular part of Schur decomposition */ MatrixType m_U; /**< \brief Unitary part of Schur decomposition */ MatrixType m_fT; /**< \brief %Matrix function applied to #m_T */ @@ -191,12 +197,12 @@ class MatrixFunction /** \brief Constructor. * - * \param[in] A argument of matrix function, should be a square matrix. - * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x. + * \param[in] A argument of matrix function, should be a square matrix. + * \param[in] atomic class for computing matrix function of atomic blocks. */ -template -MatrixFunction::MatrixFunction(const MatrixType& A, StemFunction f) : - m_A(A), m_f(f) +template +MatrixFunction::MatrixFunction(const MatrixType& A, AtomicType& atomic) + : m_A(A), m_atomic(atomic) { /* empty body */ } @@ -206,9 +212,9 @@ MatrixFunction::MatrixFunction(const MatrixType& A, StemFunction f * \param[out] result the function \p f applied to \p A, as * specified in the constructor. */ -template +template template -void MatrixFunction::compute(ResultType& result) +void MatrixFunction::compute(ResultType& result) { computeSchurDecomposition(); partitionEigenvalues(); @@ -222,8 +228,8 @@ void MatrixFunction::compute(ResultType& result) } /** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */ -template -void MatrixFunction::computeSchurDecomposition() +template +void MatrixFunction::computeSchurDecomposition() { const ComplexSchur schurOfA(m_A); m_T = schurOfA.matrixT(); @@ -241,8 +247,8 @@ void MatrixFunction::computeSchurDecomposition() * The implementation follows Algorithm 4.1 in the paper of Davies * and Higham. */ -template -void MatrixFunction::partitionEigenvalues() +template +void MatrixFunction::partitionEigenvalues() { const Index rows = m_T.rows(); VectorType diag = m_T.diagonal(); // contains eigenvalues of A @@ -278,8 +284,8 @@ void MatrixFunction::partitionEigenvalues() * \returns Iterator to cluster containing \c key, or * \c m_clusters.end() if no cluster in m_clusters contains \c key. */ -template -typename MatrixFunction::ListOfClusters::iterator MatrixFunction::findCluster(Scalar key) +template +typename MatrixFunction::ListOfClusters::iterator MatrixFunction::findCluster(Scalar key) { typename Cluster::iterator j; for (typename ListOfClusters::iterator i = m_clusters.begin(); i != m_clusters.end(); ++i) { @@ -291,8 +297,8 @@ typename MatrixFunction::ListOfClusters::iterator MatrixFunction -void MatrixFunction::computeClusterSize() +template +void MatrixFunction::computeClusterSize() { const Index rows = m_T.rows(); VectorType diag = m_T.diagonal(); @@ -313,8 +319,8 @@ void MatrixFunction::computeClusterSize() } /** \brief Compute #m_blockStart using #m_clusterSize */ -template -void MatrixFunction::computeBlockStart() +template +void MatrixFunction::computeBlockStart() { m_blockStart.resize(m_clusterSize.rows()); m_blockStart(0) = 0; @@ -324,8 +330,8 @@ void MatrixFunction::computeBlockStart() } /** \brief Compute #m_permutation using #m_eivalToCluster and #m_blockStart */ -template -void MatrixFunction::constructPermutation() +template +void MatrixFunction::constructPermutation() { DynamicIntVectorType indexNextEntry = m_blockStart; m_permutation.resize(m_T.rows()); @@ -337,8 +343,8 @@ void MatrixFunction::constructPermutation() } /** \brief Permute Schur decomposition in #m_U and #m_T according to #m_permutation */ -template -void MatrixFunction::permuteSchur() +template +void MatrixFunction::permuteSchur() { IntVectorType p = m_permutation; for (Index i = 0; i < p.rows() - 1; i++) { @@ -355,8 +361,8 @@ void MatrixFunction::permuteSchur() } /** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */ -template -void MatrixFunction::swapEntriesInSchur(Index index) +template +void MatrixFunction::swapEntriesInSchur(Index index) { JacobiRotation rotation; rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index)); @@ -367,25 +373,23 @@ void MatrixFunction::swapEntriesInSchur(Index index) /** \brief Compute block diagonal part of #m_fT. * - * This routine computes the matrix function #m_f applied to the block - * diagonal part of #m_T, with the blocking given by #m_blockStart. The - * result is stored in #m_fT. The off-diagonal parts of #m_fT are set - * to zero. + * This routine computes the matrix function applied to the block diagonal part of #m_T, with the blocking + * given by #m_blockStart. The matrix function of each diagonal block is computed by #m_atomic. The + * off-diagonal parts of #m_fT are set to zero. */ -template -void MatrixFunction::computeBlockAtomic() +template +void MatrixFunction::computeBlockAtomic() { m_fT.resize(m_T.rows(), m_T.cols()); m_fT.setZero(); - MatrixFunctionAtomic mfa(m_f); for (Index i = 0; i < m_clusterSize.rows(); ++i) { - block(m_fT, i, i) = mfa.compute(block(m_T, i, i)); + block(m_fT, i, i) = m_atomic.compute(block(m_T, i, i)); } } /** \brief Return block of matrix according to blocking given by #m_blockStart */ -template -Block MatrixFunction::block(MatrixType& A, Index i, Index j) +template +Block MatrixFunction::block(MatrixType& A, Index i, Index j) { return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j)); } @@ -393,12 +397,12 @@ Block MatrixFunction::block(MatrixType& A, Index i, In /** \brief Compute part of #m_fT above block diagonal. * * This routine assumes that the block diagonal part of #m_fT (which - * equals #m_f applied to #m_T) has already been computed and computes + * equals the matrix function applied to #m_T) has already been computed and computes * the part above the block diagonal. The part below the diagonal is * zero, because #m_T is upper triangular. */ -template -void MatrixFunction::computeOffDiagonal() +template +void MatrixFunction::computeOffDiagonal() { for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) { for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) { @@ -439,8 +443,8 @@ void MatrixFunction::computeOffDiagonal() * solution). In that case, these equations can be evaluated in the * order \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$. */ -template -typename MatrixFunction::DynMatrixType MatrixFunction::solveTriangularSylvester( +template +typename MatrixFunction::DynMatrixType MatrixFunction::solveTriangularSylvester( const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C) @@ -520,8 +524,18 @@ template class MatrixFunctionReturnValue template inline void evalTo(ResultType& result) const { - const typename Derived::PlainObject Aevaluated = m_A.eval(); - MatrixFunction mf(Aevaluated, m_f); + typedef typename Derived::PlainObject PlainObject; + typedef internal::traits Traits; + static const int RowsAtCompileTime = Traits::RowsAtCompileTime; + static const int ColsAtCompileTime = Traits::ColsAtCompileTime; + static const int Options = PlainObject::Options; + typedef std::complex::Real> ComplexScalar; + typedef Matrix DynMatrixType; + typedef MatrixFunctionAtomic AtomicType; + AtomicType atomic(m_f); + + const PlainObject Aevaluated = m_A.eval(); + MatrixFunction mf(Aevaluated, atomic); mf.compute(result); } @@ -586,4 +600,6 @@ const MatrixFunctionReturnValue MatrixBase::cosh() const return MatrixFunctionReturnValue(derived(), StdStemFunctions::cosh); } +} // end namespace Eigen + #endif // EIGEN_MATRIX_FUNCTION diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h index d08766921..97ab662fe 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h @@ -25,6 +25,8 @@ #ifndef EIGEN_MATRIX_FUNCTION_ATOMIC #define EIGEN_MATRIX_FUNCTION_ATOMIC +namespace Eigen { + /** \ingroup MatrixFunctions_Module * \class MatrixFunctionAtomic * \brief Helper class for computing matrix functions of atomic matrices. @@ -139,4 +141,6 @@ bool MatrixFunctionAtomic::taylorConverged(Index s, const MatrixType return false; } +} // end namespace Eigen + #endif // EIGEN_MATRIX_FUNCTION_ATOMIC diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h new file mode 100644 index 000000000..e6f25b73c --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h @@ -0,0 +1,510 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Jitse Niesen +// Copyright (C) 2011 Chen-Pang He +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MATRIX_LOGARITHM +#define EIGEN_MATRIX_LOGARITHM + +#ifndef M_PI +#define M_PI 3.141592653589793238462643383279503L +#endif + +namespace Eigen { + +/** \ingroup MatrixFunctions_Module + * \class MatrixLogarithmAtomic + * \brief Helper class for computing matrix logarithm of atomic matrices. + * + * \internal + * Here, an atomic matrix is a triangular matrix whose diagonal + * entries are close to each other. + * + * \sa class MatrixFunctionAtomic, MatrixBase::log() + */ +template +class MatrixLogarithmAtomic +{ +public: + + typedef typename MatrixType::Scalar Scalar; + // typedef typename MatrixType::Index Index; + typedef typename NumTraits::Real RealScalar; + // typedef typename internal::stem_function::type StemFunction; + // typedef Matrix VectorType; + + /** \brief Constructor. */ + MatrixLogarithmAtomic() { } + + /** \brief Compute matrix logarithm of atomic matrix + * \param[in] A argument of matrix logarithm, should be upper triangular and atomic + * \returns The logarithm of \p A. + */ + MatrixType compute(const MatrixType& A); + +private: + + void compute2x2(const MatrixType& A, MatrixType& result); + void computeBig(const MatrixType& A, MatrixType& result); + static Scalar atanh(Scalar x); + int getPadeDegree(float normTminusI); + int getPadeDegree(double normTminusI); + int getPadeDegree(long double normTminusI); + void computePade(MatrixType& result, const MatrixType& T, int degree); + void computePade3(MatrixType& result, const MatrixType& T); + void computePade4(MatrixType& result, const MatrixType& T); + void computePade5(MatrixType& result, const MatrixType& T); + void computePade6(MatrixType& result, const MatrixType& T); + void computePade7(MatrixType& result, const MatrixType& T); + void computePade8(MatrixType& result, const MatrixType& T); + void computePade9(MatrixType& result, const MatrixType& T); + void computePade10(MatrixType& result, const MatrixType& T); + void computePade11(MatrixType& result, const MatrixType& T); + + static const int minPadeDegree = 3; + static const int maxPadeDegree = std::numeric_limits::digits<= 24? 5: // single precision + std::numeric_limits::digits<= 53? 7: // double precision + std::numeric_limits::digits<= 64? 8: // extended precision + std::numeric_limits::digits<=106? 10: 11; // double-double or quadruple precision + + // Prevent copying + MatrixLogarithmAtomic(const MatrixLogarithmAtomic&); + MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&); +}; + +/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */ +template +MatrixType MatrixLogarithmAtomic::compute(const MatrixType& A) +{ + using std::log; + MatrixType result(A.rows(), A.rows()); + if (A.rows() == 1) + result(0,0) = log(A(0,0)); + else if (A.rows() == 2) + compute2x2(A, result); + else + computeBig(A, result); + return result; +} + +/** \brief Compute atanh (inverse hyperbolic tangent). */ +template +typename MatrixType::Scalar MatrixLogarithmAtomic::atanh(typename MatrixType::Scalar x) +{ + using std::abs; + using std::sqrt; + if (abs(x) > sqrt(NumTraits::epsilon())) + return Scalar(0.5) * log((Scalar(1) + x) / (Scalar(1) - x)); + else + return x + x*x*x / Scalar(3); +} + +/** \brief Compute logarithm of 2x2 triangular matrix. */ +template +void MatrixLogarithmAtomic::compute2x2(const MatrixType& A, MatrixType& result) +{ + using std::abs; + using std::ceil; + using std::imag; + using std::log; + + Scalar logA00 = log(A(0,0)); + Scalar logA11 = log(A(1,1)); + + result(0,0) = logA00; + result(1,0) = Scalar(0); + result(1,1) = logA11; + + if (A(0,0) == A(1,1)) { + result(0,1) = A(0,1) / A(0,0); + } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) { + result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0)); + } else { + // computation in previous branch is inaccurate if A(1,1) \approx A(0,0) + int unwindingNumber = static_cast(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI))); + Scalar z = (A(1,1) - A(0,0)) / (A(1,1) + A(0,0)); + result(0,1) = A(0,1) * (Scalar(2) * atanh(z) + Scalar(0,2*M_PI*unwindingNumber)) / (A(1,1) - A(0,0)); + } +} + +/** \brief Compute logarithm of triangular matrices with size > 2. + * \details This uses a inverse scale-and-square algorithm. */ +template +void MatrixLogarithmAtomic::computeBig(const MatrixType& A, MatrixType& result) +{ + int numberOfSquareRoots = 0; + int numberOfExtraSquareRoots = 0; + int degree; + MatrixType T = A; + const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision + maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision + maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision + maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double + 1.1880960220216759245467951592883642e-1L; // quadruple precision + + while (true) { + RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff(); + if (normTminusI < maxNormForPade) { + degree = getPadeDegree(normTminusI); + int degree2 = getPadeDegree(normTminusI / RealScalar(2)); + if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) + break; + ++numberOfExtraSquareRoots; + } + MatrixType sqrtT; + MatrixSquareRootTriangular(T).compute(sqrtT); + T = sqrtT; + ++numberOfSquareRoots; + } + + computePade(result, T, degree); + result *= pow(RealScalar(2), numberOfSquareRoots); +} + +/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */ +template +int MatrixLogarithmAtomic::getPadeDegree(float normTminusI) +{ + const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1, + 5.3149729967117310e-1 }; + for (int degree = 3; degree <= maxPadeDegree; ++degree) + if (normTminusI <= maxNormForPade[degree - minPadeDegree]) + return degree; + assert(false); // this line should never be reached +} + +/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */ +template +int MatrixLogarithmAtomic::getPadeDegree(double normTminusI) +{ + const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2, + 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 }; + for (int degree = 3; degree <= maxPadeDegree; ++degree) + if (normTminusI <= maxNormForPade[degree - minPadeDegree]) + return degree; + assert(false); // this line should never be reached +} + +/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */ +template +int MatrixLogarithmAtomic::getPadeDegree(long double normTminusI) +{ +#if LDBL_MANT_DIG == 53 // double precision + const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2, + 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 }; +#elif LDBL_MANT_DIG <= 64 // extended precision + const double maxNormForPade[] = { 5.48256690357782863103e-3 /* degree = 3 */, 2.34559162387971167321e-2, + 5.84603923897347449857e-2, 1.08486423756725170223e-1, 1.68385767881294446649e-1, + 2.32777776523703892094e-1 }; +#elif LDBL_MANT_DIG <= 106 // double-double + const double maxNormForPade[] = { 8.58970550342939562202529664318890e-5 /* degree = 3 */, + 9.34074328446359654039446552677759e-4, 4.26117194647672175773064114582860e-3, + 1.21546224740281848743149666560464e-2, 2.61100544998339436713088248557444e-2, + 4.66170074627052749243018566390567e-2, 7.32585144444135027565872014932387e-2, + 1.05026503471351080481093652651105e-1 }; +#else // quadruple precision + const double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5 /* degree = 3 */, + 5.8853168473544560470387769480192666e-4, 2.9216120366601315391789493628113520e-3, + 8.8415758124319434347116734705174308e-3, 1.9850836029449446668518049562565291e-2, + 3.6688019729653446926585242192447447e-2, 5.9290962294020186998954055264528393e-2, + 8.6998436081634343903250580992127677e-2, 1.1880960220216759245467951592883642e-1 }; +#endif + for (int degree = 3; degree <= maxPadeDegree; ++degree) + if (normTminusI <= maxNormForPade[degree - minPadeDegree]) + return degree; + assert(false); // this line should never be reached +} + +/* \brief Compute Pade approximation to matrix logarithm */ +template +void MatrixLogarithmAtomic::computePade(MatrixType& result, const MatrixType& T, int degree) +{ + switch (degree) { + case 3: computePade3(result, T); break; + case 4: computePade4(result, T); break; + case 5: computePade5(result, T); break; + case 6: computePade6(result, T); break; + case 7: computePade7(result, T); break; + case 8: computePade8(result, T); break; + case 9: computePade9(result, T); break; + case 10: computePade10(result, T); break; + case 11: computePade11(result, T); break; + default: assert(false); // should never happen + } +} + +template +void MatrixLogarithmAtomic::computePade3(MatrixType& result, const MatrixType& T) +{ + const int degree = 3; + const RealScalar nodes[] = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, + 0.8872983346207416885179265399782400L }; + const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, + 0.2777777777777777777777777777777778L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade4(MatrixType& result, const MatrixType& T) +{ + const int degree = 4; + const RealScalar nodes[] = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, + 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L }; + const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, + 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade5(MatrixType& result, const MatrixType& T) +{ + const int degree = 5; + const RealScalar nodes[] = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, + 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L, + 0.9530899229693319963988134391496965L }; + const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, + 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L, + 0.1184634425280945437571320203599587L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade6(MatrixType& result, const MatrixType& T) +{ + const int degree = 6; + const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, + 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L, + 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L }; + const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, + 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L, + 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade7(MatrixType& result, const MatrixType& T) +{ + const int degree = 7; + const RealScalar nodes[] = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, + 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L, + 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L, + 0.9745539561713792622630948420239256L }; + const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, + 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L, + 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L, + 0.0647424830844348466353057163395410L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade8(MatrixType& result, const MatrixType& T) +{ + const int degree = 8; + const RealScalar nodes[] = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, + 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L, + 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L, + 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L }; + const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, + 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L, + 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L, + 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade9(MatrixType& result, const MatrixType& T) +{ + const int degree = 9; + const RealScalar nodes[] = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, + 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L, + 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L, + 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L, + 0.9840801197538130449177881014518364L }; + const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, + 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L, + 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L, + 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L, + 0.0406371941807872059859460790552618L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade10(MatrixType& result, const MatrixType& T) +{ + const int degree = 10; + const RealScalar nodes[] = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, + 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L, + 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L, + 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L, + 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L }; + const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, + 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L, + 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L, + 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L, + 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +template +void MatrixLogarithmAtomic::computePade11(MatrixType& result, const MatrixType& T) +{ + const int degree = 11; + const RealScalar nodes[] = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, + 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L, + 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L, + 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L, + 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L, + 0.9891143290730284964019690005614287L }; + const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, + 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L, + 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L, + 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L, + 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L, + 0.0278342835580868332413768602212743L }; + assert(degree <= maxPadeDegree); + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) + result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI) + .template triangularView().solve(TminusI); +} + +/** \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix logarithm of some matrix (expression). + * + * \tparam Derived Type of the argument to the matrix function. + * + * This class holds the argument to the matrix function until it is + * assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * matrixBase::matrixLogarithm() and most of the time this is the + * only way it is used. + */ +template class MatrixLogarithmReturnValue +: public ReturnByValue > +{ +public: + + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; + + /** \brief Constructor. + * + * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm. + */ + MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } + + /** \brief Compute the matrix logarithm. + * + * \param[out] result Logarithm of \p A, where \A is as specified in the constructor. + */ + template + inline void evalTo(ResultType& result) const + { + typedef typename Derived::PlainObject PlainObject; + typedef internal::traits Traits; + static const int RowsAtCompileTime = Traits::RowsAtCompileTime; + static const int ColsAtCompileTime = Traits::ColsAtCompileTime; + static const int Options = PlainObject::Options; + typedef std::complex::Real> ComplexScalar; + typedef Matrix DynMatrixType; + typedef MatrixLogarithmAtomic AtomicType; + AtomicType atomic; + + const PlainObject Aevaluated = m_A.eval(); + MatrixFunction mf(Aevaluated, atomic); + mf.compute(result); + } + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + +private: + typename internal::nested::type m_A; + + MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&); +}; + +namespace internal { + template + struct traits > + { + typedef typename Derived::PlainObject ReturnType; + }; +} + + +/********** MatrixBase method **********/ + + +template +const MatrixLogarithmReturnValue MatrixBase::log() const +{ + eigen_assert(rows() == cols()); + return MatrixLogarithmReturnValue(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_LOGARITHM diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h new file mode 100644 index 000000000..658cd334c --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h @@ -0,0 +1,499 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Jitse Niesen +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MATRIX_SQUARE_ROOT +#define EIGEN_MATRIX_SQUARE_ROOT + +namespace Eigen { + +/** \ingroup MatrixFunctions_Module + * \brief Class for computing matrix square roots of upper quasi-triangular matrices. + * \tparam MatrixType type of the argument of the matrix square root, + * expected to be an instantiation of the Matrix class template. + * + * This class computes the square root of the upper quasi-triangular + * matrix stored in the upper Hessenberg part of the matrix passed to + * the constructor. + * + * \sa MatrixSquareRoot, MatrixSquareRootTriangular + */ +template +class MatrixSquareRootQuasiTriangular +{ + public: + + /** \brief Constructor. + * + * \param[in] A upper quasi-triangular matrix whose square root + * is to be computed. + * + * The class stores a reference to \p A, so it should not be + * changed (or destroyed) before compute() is called. + */ + MatrixSquareRootQuasiTriangular(const MatrixType& A) + : m_A(A) + { + eigen_assert(A.rows() == A.cols()); + } + + /** \brief Compute the matrix square root + * + * \param[out] result square root of \p A, as specified in the constructor. + * + * Only the upper Hessenberg part of \p result is updated, the + * rest is not touched. See MatrixBase::sqrt() for details on + * how this computation is implemented. + */ + template void compute(ResultType &result); + + private: + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + + void computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T); + void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T); + void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i); + void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j); + void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j); + void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j); + void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j); + + template + static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A, + const SmallMatrixType& B, const SmallMatrixType& C); + + const MatrixType& m_A; +}; + +template +template +void MatrixSquareRootQuasiTriangular::compute(ResultType &result) +{ + // Compute Schur decomposition of m_A + const RealSchur schurOfA(m_A); + const MatrixType& T = schurOfA.matrixT(); + const MatrixType& U = schurOfA.matrixU(); + + // Compute square root of T + MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows()); + computeDiagonalPartOfSqrt(sqrtT, T); + computeOffDiagonalPartOfSqrt(sqrtT, T); + + // Compute square root of m_A + result = U * sqrtT * U.adjoint(); +} + +// pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size +// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T +template +void MatrixSquareRootQuasiTriangular::computeDiagonalPartOfSqrt(MatrixType& sqrtT, + const MatrixType& T) +{ + const Index size = m_A.rows(); + for (Index i = 0; i < size; i++) { + if (i == size - 1 || T.coeff(i+1, i) == 0) { + eigen_assert(T(i,i) > 0); + sqrtT.coeffRef(i,i) = internal::sqrt(T.coeff(i,i)); + } + else { + compute2x2diagonalBlock(sqrtT, T, i); + ++i; + } + } +} + +// pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T. +// post: sqrtT is the square root of T. +template +void MatrixSquareRootQuasiTriangular::computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, + const MatrixType& T) +{ + const Index size = m_A.rows(); + for (Index j = 1; j < size; j++) { + if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block + continue; + for (Index i = j-1; i >= 0; i--) { + if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block + continue; + bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0); + bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0); + if (iBlockIs2x2 && jBlockIs2x2) + compute2x2offDiagonalBlock(sqrtT, T, i, j); + else if (iBlockIs2x2 && !jBlockIs2x2) + compute2x1offDiagonalBlock(sqrtT, T, i, j); + else if (!iBlockIs2x2 && jBlockIs2x2) + compute1x2offDiagonalBlock(sqrtT, T, i, j); + else if (!iBlockIs2x2 && !jBlockIs2x2) + compute1x1offDiagonalBlock(sqrtT, T, i, j); + } + } +} + +// pre: T.block(i,i,2,2) has complex conjugate eigenvalues +// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2) +template +void MatrixSquareRootQuasiTriangular + ::compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i) +{ + // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere + // in EigenSolver. If we expose it, we could call it directly from here. + Matrix block = T.template block<2,2>(i,i); + EigenSolver > es(block); + sqrtT.template block<2,2>(i,i) + = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real(); +} + +// pre: block structure of T is such that (i,j) is a 1x1 block, +// all blocks of sqrtT to left of and below (i,j) are correct +// post: sqrtT(i,j) has the correct value +template +void MatrixSquareRootQuasiTriangular + ::compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j) +{ + Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value(); + sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j)); +} + +// similar to compute1x1offDiagonalBlock() +template +void MatrixSquareRootQuasiTriangular + ::compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j) +{ + Matrix rhs = T.template block<1,2>(i,j); + if (j-i > 1) + rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2); + Matrix A = sqrtT.coeff(i,i) * Matrix::Identity(); + A += sqrtT.template block<2,2>(j,j).transpose(); + sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose()); +} + +// similar to compute1x1offDiagonalBlock() +template +void MatrixSquareRootQuasiTriangular + ::compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j) +{ + Matrix rhs = T.template block<2,1>(i,j); + if (j-i > 2) + rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1); + Matrix A = sqrtT.coeff(j,j) * Matrix::Identity(); + A += sqrtT.template block<2,2>(i,i); + sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs); +} + +// similar to compute1x1offDiagonalBlock() +template +void MatrixSquareRootQuasiTriangular + ::compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T, + typename MatrixType::Index i, typename MatrixType::Index j) +{ + Matrix A = sqrtT.template block<2,2>(i,i); + Matrix B = sqrtT.template block<2,2>(j,j); + Matrix C = T.template block<2,2>(i,j); + if (j-i > 2) + C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2); + Matrix X; + solveAuxiliaryEquation(X, A, B, C); + sqrtT.template block<2,2>(i,j) = X; +} + +// solves the equation A X + X B = C where all matrices are 2-by-2 +template +template +void MatrixSquareRootQuasiTriangular + ::solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A, + const SmallMatrixType& B, const SmallMatrixType& C) +{ + EIGEN_STATIC_ASSERT((internal::is_same >::value), + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT); + + Matrix coeffMatrix = Matrix::Zero(); + coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0); + coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1); + coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0); + coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1); + coeffMatrix.coeffRef(0,1) = B.coeff(1,0); + coeffMatrix.coeffRef(0,2) = A.coeff(0,1); + coeffMatrix.coeffRef(1,0) = B.coeff(0,1); + coeffMatrix.coeffRef(1,3) = A.coeff(0,1); + coeffMatrix.coeffRef(2,0) = A.coeff(1,0); + coeffMatrix.coeffRef(2,3) = B.coeff(1,0); + coeffMatrix.coeffRef(3,1) = A.coeff(1,0); + coeffMatrix.coeffRef(3,2) = B.coeff(0,1); + + Matrix rhs; + rhs.coeffRef(0) = C.coeff(0,0); + rhs.coeffRef(1) = C.coeff(0,1); + rhs.coeffRef(2) = C.coeff(1,0); + rhs.coeffRef(3) = C.coeff(1,1); + + Matrix result; + result = coeffMatrix.fullPivLu().solve(rhs); + + X.coeffRef(0,0) = result.coeff(0); + X.coeffRef(0,1) = result.coeff(1); + X.coeffRef(1,0) = result.coeff(2); + X.coeffRef(1,1) = result.coeff(3); +} + + +/** \ingroup MatrixFunctions_Module + * \brief Class for computing matrix square roots of upper triangular matrices. + * \tparam MatrixType type of the argument of the matrix square root, + * expected to be an instantiation of the Matrix class template. + * + * This class computes the square root of the upper triangular matrix + * stored in the upper triangular part (including the diagonal) of + * the matrix passed to the constructor. + * + * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular + */ +template +class MatrixSquareRootTriangular +{ + public: + MatrixSquareRootTriangular(const MatrixType& A) + : m_A(A) + { + eigen_assert(A.rows() == A.cols()); + } + + /** \brief Compute the matrix square root + * + * \param[out] result square root of \p A, as specified in the constructor. + * + * Only the upper triangular part (including the diagonal) of + * \p result is updated, the rest is not touched. See + * MatrixBase::sqrt() for details on how this computation is + * implemented. + */ + template void compute(ResultType &result); + + private: + const MatrixType& m_A; +}; + +template +template +void MatrixSquareRootTriangular::compute(ResultType &result) +{ + // Compute Schur decomposition of m_A + const ComplexSchur schurOfA(m_A); + const MatrixType& T = schurOfA.matrixT(); + const MatrixType& U = schurOfA.matrixU(); + + // Compute square root of T and store it in upper triangular part of result + // This uses that the square root of triangular matrices can be computed directly. + result.resize(m_A.rows(), m_A.cols()); + typedef typename MatrixType::Index Index; + for (Index i = 0; i < m_A.rows(); i++) { + result.coeffRef(i,i) = internal::sqrt(T.coeff(i,i)); + } + for (Index j = 1; j < m_A.cols(); j++) { + for (Index i = j-1; i >= 0; i--) { + typedef typename MatrixType::Scalar Scalar; + // if i = j-1, then segment has length 0 so tmp = 0 + Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value(); + // denominator may be zero if original matrix is singular + result.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j)); + } + } + + // Compute square root of m_A as U * result * U.adjoint() + MatrixType tmp; + tmp.noalias() = U * result.template triangularView(); + result.noalias() = tmp * U.adjoint(); +} + + +/** \ingroup MatrixFunctions_Module + * \brief Class for computing matrix square roots of general matrices. + * \tparam MatrixType type of the argument of the matrix square root, + * expected to be an instantiation of the Matrix class template. + * + * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt() + */ +template ::Scalar>::IsComplex> +class MatrixSquareRoot +{ + public: + + /** \brief Constructor. + * + * \param[in] A matrix whose square root is to be computed. + * + * The class stores a reference to \p A, so it should not be + * changed (or destroyed) before compute() is called. + */ + MatrixSquareRoot(const MatrixType& A); + + /** \brief Compute the matrix square root + * + * \param[out] result square root of \p A, as specified in the constructor. + * + * See MatrixBase::sqrt() for details on how this computation is + * implemented. + */ + template void compute(ResultType &result); +}; + + +// ********** Partial specialization for real matrices ********** + +template +class MatrixSquareRoot +{ + public: + + MatrixSquareRoot(const MatrixType& A) + : m_A(A) + { + eigen_assert(A.rows() == A.cols()); + } + + template void compute(ResultType &result) + { + // Compute Schur decomposition of m_A + const RealSchur schurOfA(m_A); + const MatrixType& T = schurOfA.matrixT(); + const MatrixType& U = schurOfA.matrixU(); + + // Compute square root of T + MatrixSquareRootQuasiTriangular tmp(T); + MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows()); + tmp.compute(sqrtT); + + // Compute square root of m_A + result = U * sqrtT * U.adjoint(); + } + + private: + const MatrixType& m_A; +}; + + +// ********** Partial specialization for complex matrices ********** + +template +class MatrixSquareRoot +{ + public: + + MatrixSquareRoot(const MatrixType& A) + : m_A(A) + { + eigen_assert(A.rows() == A.cols()); + } + + template void compute(ResultType &result) + { + // Compute Schur decomposition of m_A + const ComplexSchur schurOfA(m_A); + const MatrixType& T = schurOfA.matrixT(); + const MatrixType& U = schurOfA.matrixU(); + + // Compute square root of T + MatrixSquareRootTriangular tmp(T); + MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows()); + tmp.compute(sqrtT); + + // Compute square root of m_A + result = U * sqrtT * U.adjoint(); + } + + private: + const MatrixType& m_A; +}; + + +/** \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix square root of some matrix (expression). + * + * \tparam Derived Type of the argument to the matrix square root. + * + * This class holds the argument to the matrix square root until it + * is assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixBase::sqrt() and most of the time this is the only way it is + * used. + */ +template class MatrixSquareRootReturnValue +: public ReturnByValue > +{ + typedef typename Derived::Index Index; + public: + /** \brief Constructor. + * + * \param[in] src %Matrix (expression) forming the argument of the + * matrix square root. + */ + MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { } + + /** \brief Compute the matrix square root. + * + * \param[out] result the matrix square root of \p src in the + * constructor. + */ + template + inline void evalTo(ResultType& result) const + { + const typename Derived::PlainObject srcEvaluated = m_src.eval(); + MatrixSquareRoot me(srcEvaluated); + me.compute(result); + } + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } + + protected: + const Derived& m_src; + private: + MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&); +}; + +namespace internal { +template +struct traits > +{ + typedef typename Derived::PlainObject ReturnType; +}; +} + +template +const MatrixSquareRootReturnValue MatrixBase::sqrt() const +{ + eigen_assert(rows() == cols()); + return MatrixSquareRootReturnValue(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_FUNCTION diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h index 260690b63..3de68ec3a 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h @@ -25,6 +25,8 @@ #ifndef EIGEN_STEM_FUNCTION #define EIGEN_STEM_FUNCTION +namespace Eigen { + /** \ingroup MatrixFunctions_Module * \brief Stem functions corresponding to standard mathematical functions. */ @@ -113,4 +115,6 @@ class StdStemFunctions }; // end of class StdStemFunctions +} // end namespace Eigen + #endif // EIGEN_STEM_FUNCTION diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h index bc948d0bd..123f4016e 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h @@ -26,6 +26,8 @@ #ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H #define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H +namespace Eigen { + namespace internal { /** \internal \returns the arcsin of \a a (coeff-wise) */ @@ -99,8 +101,10 @@ template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x) return _mm_xor_ps(z, sign_bit); } +#endif // EIGEN_VECTORIZE_SSE + } // end namespace internal -#endif +} // end namespace Eigen #endif // EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h index 37abb6117..aa9430359 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h @@ -28,6 +28,8 @@ #ifndef EIGEN_HYBRIDNONLINEARSOLVER_H #define EIGEN_HYBRIDNONLINEARSOLVER_H +namespace Eigen { + namespace HybridNonLinearSolverSpace { enum Status { Running = -1, @@ -602,6 +604,8 @@ HybridNonLinearSolver::solveNumericalDiff(FVectorType &x) return status; } -//vim: ai ts=4 sts=4 et sw=4 +} // end namespace Eigen + #endif // EIGEN_HYBRIDNONLINEARSOLVER_H +//vim: ai ts=4 sts=4 et sw=4 diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h index 0ae681b1c..1cb501a66 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h @@ -28,6 +28,7 @@ #ifndef EIGEN_LEVENBERGMARQUARDT__H #define EIGEN_LEVENBERGMARQUARDT__H +namespace Eigen { namespace LevenbergMarquardtSpace { enum Status { @@ -640,7 +641,7 @@ LevenbergMarquardt::lmdif1( NumericalDiff numDiff(functor); // embedded LevenbergMarquardt - LevenbergMarquardt > lm(numDiff); + LevenbergMarquardt, Scalar > lm(numDiff); lm.parameters.ftol = tol; lm.parameters.xtol = tol; lm.parameters.maxfev = 200*(n+1); @@ -651,6 +652,8 @@ LevenbergMarquardt::lmdif1( return info; } -//vim: ai ts=4 sts=4 et sw=4 +} // end namespace Eigen + #endif // EIGEN_LEVENBERGMARQUARDT__H +//vim: ai ts=4 sts=4 et sw=4 diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h index bc0cb1880..fd3e0bc4a 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h @@ -2,6 +2,8 @@ #define chkder_log10e 0.43429448190325182765 #define chkder_factor 100. +namespace Eigen { + namespace internal { template @@ -58,3 +60,4 @@ void chkder( } // end namespace internal +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h index 6c77916f5..c73a09645 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { template @@ -63,3 +65,5 @@ void covar( } } // end namespace internal + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h index cbdcf4b71..4fbc98bfc 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { template @@ -98,3 +100,5 @@ algo_end: } } // end namespace internal + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h index 0a26c2061..1cabe69ae 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { template @@ -70,3 +72,5 @@ DenseIndex fdjac1( } } // end namespace internal + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h index 62f4aabc9..cc1ca530f 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { template @@ -288,3 +290,5 @@ void lmpar2( } } // end namespace internal + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h index cb1764a41..feafd62a8 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { // TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt @@ -85,3 +87,5 @@ void qrsolv( } } // end namespace internal + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h index ffe505cd5..36ff700e9 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { // TODO : move this to GivensQR once there's such a thing in Eigen @@ -24,3 +26,5 @@ void r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector @@ -93,3 +95,5 @@ void r1updt( } } // end namespace internal + +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h index ab83f9b25..9ce079e22 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h @@ -1,3 +1,5 @@ +namespace Eigen { + namespace internal { template @@ -44,3 +46,4 @@ void rwupdt( } // end namespace internal +} // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h index 52dc0ec01..8651585f2 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h @@ -28,6 +28,8 @@ #ifndef EIGEN_NUMERICAL_DIFF_H #define EIGEN_NUMERICAL_DIFF_H +namespace Eigen { + enum NumericalDiffMode { Forward, Central @@ -64,7 +66,7 @@ public: template NumericalDiff(const T0& a0, const T1& a1) : Functor(a0, a1), epsfcn(0) {} template - NumericalDiff(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2), epsfcn(0) {} + NumericalDiff(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2), epsfcn(0) {} enum { InputsAtCompileTime = Functor::InputsAtCompileTime, @@ -134,6 +136,8 @@ private: NumericalDiff& operator=(const NumericalDiff&); }; +} // end namespace Eigen + //vim: ai ts=4 sts=4 et sw=4 #endif // EIGEN_NUMERICAL_DIFF_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/Companion.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/Companion.h index 608951d3c..8936b8fad 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/Companion.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/Companion.h @@ -29,10 +29,12 @@ // * Eigen/Core // * Eigen/src/PolynomialSolver.h -#ifndef EIGEN_PARSED_BY_DOXYGEN +namespace Eigen { namespace internal { +#ifndef EIGEN_PARSED_BY_DOXYGEN + template T radix(){ return 2; } @@ -283,4 +285,6 @@ void companion<_Scalar,_Deg>::balance() } // end namespace internal +} // end namespace Eigen + #endif // EIGEN_COMPANION_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h index 417b93df2..71295a105 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h @@ -25,6 +25,8 @@ #ifndef EIGEN_POLYNOMIAL_SOLVER_H #define EIGEN_POLYNOMIAL_SOLVER_H +namespace Eigen { + /** \ingroup Polynomials_Module * \class PolynomialSolverBase. * @@ -394,4 +396,6 @@ class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1> using PS_Base::m_roots; }; +} // end namespace Eigen + #endif // EIGEN_POLYNOMIAL_SOLVER_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h index 65942c52a..1fb1ed139 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h @@ -25,6 +25,8 @@ #ifndef EIGEN_POLYNOMIAL_UTILS_H #define EIGEN_POLYNOMIAL_UTILS_H +namespace Eigen { + /** \ingroup Polynomials_Module * \returns the evaluation of the polynomial at x using Horner algorithm. * @@ -149,5 +151,6 @@ void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) } } +} // end namespace Eigen #endif // EIGEN_POLYNOMIAL_UTILS_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h index 51537402e..ef36ac9b6 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SKYLINEINPLACELU_H #define EIGEN_SKYLINEINPLACELU_H +namespace Eigen { + /** \ingroup Skyline_Module * * \class SkylineInplaceLU @@ -360,4 +362,6 @@ bool SkylineInplaceLU::solve(const MatrixBase &b, MatrixBa return true; } +} // end namespace Eigen + #endif // EIGEN_SKYLINELU_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h index 31810df08..98a19ce53 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h @@ -28,6 +28,8 @@ #include "SkylineStorage.h" #include "SkylineMatrixBase.h" +namespace Eigen { + /** \ingroup Skyline_Module * * \class SkylineMatrix @@ -870,4 +872,6 @@ protected: const Index m_end; }; +} // end namespace Eigen + #endif // EIGEN_SkylineMatrix_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h index 4d0c2397c..72131eb3f 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h @@ -27,6 +27,8 @@ #include "SkylineUtil.h" +namespace Eigen { + /** \ingroup Skyline_Module * * \class SkylineMatrixBase @@ -220,4 +222,6 @@ protected: bool m_isRValue; }; +} // end namespace Eigen + #endif // EIGEN_SkylineMatrixBase_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h index aeedc47ec..fb653b446 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SKYLINEPRODUCT_H #define EIGEN_SKYLINEPRODUCT_H +namespace Eigen { + template struct SkylineProductReturnType { typedef const typename internal::nested::type LhsNested; @@ -303,4 +305,6 @@ SkylineMatrixBase::operator*(const MatrixBase &other) con return typename SkylineProductReturnType::Type(derived(), other.derived()); } +} // end namespace Eigen + #endif // EIGEN_SKYLINEPRODUCT_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h index 62806bfb6..5721dee90 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SKYLINE_STORAGE_H #define EIGEN_SKYLINE_STORAGE_H +namespace Eigen { + /** Stores a skyline set of values in three structures : * The diagonal elements * The upper elements @@ -267,4 +269,6 @@ public: }; +} // end namespace Eigen + #endif // EIGEN_COMPRESSED_STORAGE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h index e0512476f..5c5bd8bda 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h @@ -25,6 +25,8 @@ #ifndef EIGEN_SKYLINEUTIL_H #define EIGEN_SKYLINEUTIL_H +namespace Eigen { + #ifdef NDEBUG #define EIGEN_DBG_SKYLINE(X) #else @@ -97,5 +99,6 @@ template class eval } // end namespace internal +} // end namespace Eigen #endif // EIGEN_SKYLINEUTIL_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h new file mode 100644 index 000000000..0cc6e3a06 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h @@ -0,0 +1,129 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H +#define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H + +namespace Eigen { + +/*************************************************************************** +* specialisation for DynamicSparseMatrix +***************************************************************************/ + +template +class SparseInnerVectorSet, Size> + : public SparseMatrixBase, Size> > +{ + typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType; + public: + + enum { IsRowMajor = internal::traits::IsRowMajor }; + + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) + class InnerIterator: public MatrixType::InnerIterator + { + public: + inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) + : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + + inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) + : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) + { + eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + } + + inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) + : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) + { + eigen_assert(Size!=Dynamic); + eigen_assert( (outer>=0) && (outer + inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) + { + if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) + { + // need to transpose => perform a block evaluation followed by a big swap + DynamicSparseMatrix aux(other); + *this = aux.markAsRValue(); + } + else + { + // evaluate/copy vector per vector + for (Index j=0; j aux(other.innerVector(j)); + m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); + } + } + return *this; + } + + inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) + { + return operator=(other); + } + + Index nonZeros() const + { + Index count = 0; + for (Index j=0; j0); + return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1); + } + +// template +// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) +// { +// return *this; +// } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + const typename MatrixType::Nested m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; + +}; + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupportLegacy.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupportLegacy.h deleted file mode 100644 index 676cd8574..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/CholmodSupportLegacy.h +++ /dev/null @@ -1,517 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2009 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_CHOLMODSUPPORT_LEGACY_H -#define EIGEN_CHOLMODSUPPORT_LEGACY_H - -namespace internal { - -template -void cholmod_configure_matrix_legacy(CholmodType& mat) -{ - if (internal::is_same::value) - { - mat.xtype = CHOLMOD_REAL; - mat.dtype = CHOLMOD_SINGLE; - } - else if (internal::is_same::value) - { - mat.xtype = CHOLMOD_REAL; - mat.dtype = CHOLMOD_DOUBLE; - } - else if (internal::is_same >::value) - { - mat.xtype = CHOLMOD_COMPLEX; - mat.dtype = CHOLMOD_SINGLE; - } - else if (internal::is_same >::value) - { - mat.xtype = CHOLMOD_COMPLEX; - mat.dtype = CHOLMOD_DOUBLE; - } - else - { - eigen_assert(false && "Scalar type not supported by CHOLMOD"); - } -} - -template -cholmod_sparse cholmod_map_eigen_to_sparse(_MatrixType& mat) -{ - typedef typename _MatrixType::Scalar Scalar; - cholmod_sparse res; - res.nzmax = mat.nonZeros(); - res.nrow = mat.rows();; - res.ncol = mat.cols(); - res.p = mat._outerIndexPtr(); - res.i = mat._innerIndexPtr(); - res.x = mat._valuePtr(); - res.xtype = CHOLMOD_REAL; - res.itype = CHOLMOD_INT; - res.sorted = 1; - res.packed = 1; - res.dtype = 0; - res.stype = -1; - - internal::cholmod_configure_matrix_legacy(res); - - - if (_MatrixType::Flags & SelfAdjoint) - { - if (_MatrixType::Flags & Upper) - res.stype = 1; - else if (_MatrixType::Flags & Lower) - res.stype = -1; - else - res.stype = 0; - } - else - res.stype = -1; // by default we consider the lower part - - return res; -} - -template -cholmod_dense cholmod_map_eigen_to_dense(MatrixBase& mat) -{ - EIGEN_STATIC_ASSERT((internal::traits::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - typedef typename Derived::Scalar Scalar; - - cholmod_dense res; - res.nrow = mat.rows(); - res.ncol = mat.cols(); - res.nzmax = res.nrow * res.ncol; - res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); - res.x = mat.derived().data(); - res.z = 0; - - internal::cholmod_configure_matrix_legacy(res); - - return res; -} - -template -MappedSparseMatrix map_cholmod_sparse_to_eigen(cholmod_sparse& cm) -{ - return MappedSparseMatrix - (cm.nrow, cm.ncol, reinterpret_cast(cm.p)[cm.ncol], - reinterpret_cast(cm.p), reinterpret_cast(cm.i),reinterpret_cast(cm.x) ); -} - -} // namespace internal - -template -class SparseLLT<_MatrixType, Cholmod> : public SparseLLT<_MatrixType> -{ - protected: - typedef SparseLLT<_MatrixType> Base; - typedef typename Base::Scalar Scalar; - typedef typename Base::RealScalar RealScalar; - typedef typename Base::CholMatrixType CholMatrixType; - using Base::MatrixLIsDirty; - using Base::SupernodalFactorIsDirty; - using Base::m_flags; - using Base::m_matrix; - using Base::m_status; - - public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Index Index; - - SparseLLT(int flags = 0) - : Base(flags), m_cholmodFactor(0) - { - cholmod_start(&m_cholmod); - } - - SparseLLT(const MatrixType& matrix, int flags = 0) - : Base(flags), m_cholmodFactor(0) - { - cholmod_start(&m_cholmod); - compute(matrix); - } - - ~SparseLLT() - { - if (m_cholmodFactor) - cholmod_free_factor(&m_cholmodFactor, &m_cholmod); - cholmod_finish(&m_cholmod); - } - - inline const CholMatrixType& matrixL() const; - - template - bool solveInPlace(MatrixBase &b) const; - - template - inline const internal::solve_retval, Rhs> - solve(const MatrixBase& b) const - { - eigen_assert(true && "SparseLLT is not initialized."); - return internal::solve_retval, Rhs>(*this, b.derived()); - } - - void compute(const MatrixType& matrix); - - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } - - inline const cholmod_factor* cholmodFactor() const - { return m_cholmodFactor; } - - inline cholmod_common* cholmodCommon() const - { return &m_cholmod; } - - bool succeeded() const; - - protected: - mutable cholmod_common m_cholmod; - cholmod_factor* m_cholmodFactor; -}; - - -namespace internal { - -template - struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseLLT<_MatrixType, Cholmod> SpLLTDecType; - EIGEN_MAKE_SOLVE_HELPERS(SpLLTDecType,Rhs) - - template void evalTo(Dest& dst) const - { - //Index size = dec().cholmodFactor()->n; - eigen_assert((Index)dec().cholmodFactor()->n==rhs().rows()); - - cholmod_factor* cholmodFactor = const_cast(dec().cholmodFactor()); - cholmod_common* cholmodCommon = const_cast(dec().cholmodCommon()); - // this uses Eigen's triangular sparse solver - // if (m_status & MatrixLIsDirty) - // matrixL(); - // Base::solveInPlace(b); - // as long as our own triangular sparse solver is not fully optimal, - // let's use CHOLMOD's one: - cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(rhs().const_cast_derived()); - cholmod_dense* x = cholmod_solve(CHOLMOD_A, cholmodFactor, &cdb, cholmodCommon); - - dst = Matrix::Map(reinterpret_cast(x->x), rhs().rows()); - - cholmod_free_dense(&x, cholmodCommon); - - } - -}; - -} // namespace internal - - - -template -void SparseLLT<_MatrixType,Cholmod>::compute(const _MatrixType& a) -{ - if (m_cholmodFactor) - { - cholmod_free_factor(&m_cholmodFactor, &m_cholmod); - m_cholmodFactor = 0; - } - - cholmod_sparse A = internal::cholmod_map_eigen_to_sparse(const_cast<_MatrixType&>(a)); -// m_cholmod.supernodal = CHOLMOD_AUTO; - // TODO -// if (m_flags&IncompleteFactorization) -// { -// m_cholmod.nmethods = 1; -// m_cholmod.method[0].ordering = CHOLMOD_NATURAL; -// m_cholmod.postorder = 0; -// } -// else -// { -// m_cholmod.nmethods = 1; -// m_cholmod.method[0].ordering = CHOLMOD_NATURAL; -// m_cholmod.postorder = 0; -// } -// m_cholmod.final_ll = 1; - m_cholmodFactor = cholmod_analyze(&A, &m_cholmod); - cholmod_factorize(&A, m_cholmodFactor, &m_cholmod); - - this->m_status = (this->m_status & ~Base::SupernodalFactorIsDirty) | Base::MatrixLIsDirty; -} - - -// TODO -template -bool SparseLLT<_MatrixType,Cholmod>::succeeded() const -{ return true; } - - - -template -inline const typename SparseLLT<_MatrixType,Cholmod>::CholMatrixType& -SparseLLT<_MatrixType,Cholmod>::matrixL() const -{ - if (this->m_status & Base::MatrixLIsDirty) - { - eigen_assert(!(this->m_status & Base::SupernodalFactorIsDirty)); - - cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod); - const_cast(this->m_matrix) = - internal::map_cholmod_sparse_to_eigen(*cmRes); - free(cmRes); - - this->m_status = (this->m_status & ~Base::MatrixLIsDirty); - } - return this->m_matrix; -} - - - - -template -template -bool SparseLLT<_MatrixType,Cholmod>::solveInPlace(MatrixBase &b) const -{ - //Index size = m_cholmodFactor->n; - eigen_assert((Index)m_cholmodFactor->n==b.rows()); - - // this uses Eigen's triangular sparse solver - // if (m_status & MatrixLIsDirty) - // matrixL(); - // Base::solveInPlace(b); - // as long as our own triangular sparse solver is not fully optimal, - // let's use CHOLMOD's one: - cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(b); - - cholmod_dense* x = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &cdb, &m_cholmod); - eigen_assert(x && "Eigen: cholmod_solve failed."); - - b = Matrix::Map(reinterpret_cast(x->x),b.rows()); - cholmod_free_dense(&x, &m_cholmod); - return true; -} - - - - - - - - - - - -template -class SparseLDLT<_MatrixType,Cholmod> : public SparseLDLT<_MatrixType> -{ - protected: - typedef SparseLDLT<_MatrixType> Base; - typedef typename Base::Scalar Scalar; - typedef typename Base::RealScalar RealScalar; - using Base::MatrixLIsDirty; - using Base::SupernodalFactorIsDirty; - using Base::m_flags; - using Base::m_matrix; - using Base::m_status; - - public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Index Index; - - SparseLDLT(int flags = 0) - : Base(flags), m_cholmodFactor(0) - { - cholmod_start(&m_cholmod); - } - - SparseLDLT(const _MatrixType& matrix, int flags = 0) - : Base(flags), m_cholmodFactor(0) - { - cholmod_start(&m_cholmod); - compute(matrix); - } - - ~SparseLDLT() - { - if (m_cholmodFactor) - cholmod_free_factor(&m_cholmodFactor, &m_cholmod); - cholmod_finish(&m_cholmod); - } - - inline const typename Base::CholMatrixType& matrixL(void) const; - - template - void solveInPlace(MatrixBase &b) const; - - template - inline const internal::solve_retval, Rhs> - solve(const MatrixBase& b) const - { - eigen_assert(true && "SparseLDLT is not initialized."); - return internal::solve_retval, Rhs>(*this, b.derived()); - } - - void compute(const _MatrixType& matrix); - - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } - - inline const cholmod_factor* cholmodFactor() const - { return m_cholmodFactor; } - - inline cholmod_common* cholmodCommon() const - { return &m_cholmod; } - - bool succeeded() const; - - protected: - mutable cholmod_common m_cholmod; - cholmod_factor* m_cholmodFactor; -}; - - - -namespace internal { - -template - struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseLDLT<_MatrixType, Cholmod> SpLDLTDecType; - EIGEN_MAKE_SOLVE_HELPERS(SpLDLTDecType,Rhs) - - template void evalTo(Dest& dst) const - { - //Index size = dec().cholmodFactor()->n; - eigen_assert((Index)dec().cholmodFactor()->n==rhs().rows()); - - cholmod_factor* cholmodFactor = const_cast(dec().cholmodFactor()); - cholmod_common* cholmodCommon = const_cast(dec().cholmodCommon()); - // this uses Eigen's triangular sparse solver - // if (m_status & MatrixLIsDirty) - // matrixL(); - // Base::solveInPlace(b); - // as long as our own triangular sparse solver is not fully optimal, - // let's use CHOLMOD's one: - cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(rhs().const_cast_derived()); - cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, cholmodFactor, &cdb, cholmodCommon); - - dst = Matrix::Map(reinterpret_cast(x->x), rhs().rows()); - cholmod_free_dense(&x, cholmodCommon); - - } - -}; - - -} // namespace internal - -template -void SparseLDLT<_MatrixType,Cholmod>::compute(const _MatrixType& a) -{ - if (m_cholmodFactor) - { - cholmod_free_factor(&m_cholmodFactor, &m_cholmod); - m_cholmodFactor = 0; - } - - cholmod_sparse A = internal::cholmod_map_eigen_to_sparse(const_cast<_MatrixType&>(a)); - - //m_cholmod.supernodal = CHOLMOD_AUTO; - m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; - //m_cholmod.supernodal = CHOLMOD_SUPERNODAL; - // TODO - if (this->m_flags & IncompleteFactorization) - { - m_cholmod.nmethods = 1; - //m_cholmod.method[0].ordering = CHOLMOD_NATURAL; - m_cholmod.method[0].ordering = CHOLMOD_COLAMD; - m_cholmod.postorder = 1; - } - else - { - m_cholmod.nmethods = 1; - m_cholmod.method[0].ordering = CHOLMOD_NATURAL; - m_cholmod.postorder = 0; - } - m_cholmod.final_ll = 0; - m_cholmodFactor = cholmod_analyze(&A, &m_cholmod); - cholmod_factorize(&A, m_cholmodFactor, &m_cholmod); - - this->m_status = (this->m_status & ~Base::SupernodalFactorIsDirty) | Base::MatrixLIsDirty; -} - - -// TODO -template -bool SparseLDLT<_MatrixType,Cholmod>::succeeded() const -{ return true; } - - -template -inline const typename SparseLDLT<_MatrixType>::CholMatrixType& -SparseLDLT<_MatrixType,Cholmod>::matrixL() const -{ - if (this->m_status & Base::MatrixLIsDirty) - { - eigen_assert(!(this->m_status & Base::SupernodalFactorIsDirty)); - - cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod); - const_cast(this->m_matrix) = MappedSparseMatrix(*cmRes); - free(cmRes); - - this->m_status = (this->m_status & ~Base::MatrixLIsDirty); - } - return this->m_matrix; -} - - - - - - -template -template -void SparseLDLT<_MatrixType,Cholmod>::solveInPlace(MatrixBase &b) const -{ - //Index size = m_cholmodFactor->n; - eigen_assert((Index)m_cholmodFactor->n == b.rows()); - - // this uses Eigen's triangular sparse solver - // if (m_status & MatrixLIsDirty) - // matrixL(); - // Base::solveInPlace(b); - // as long as our own triangular sparse solver is not fully optimal, - // let's use CHOLMOD's one: - cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(b); - cholmod_dense* x = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &cdb, &m_cholmod); - b = Matrix::Map(reinterpret_cast(x->x),b.rows()); - cholmod_free_dense(&x, &m_cholmod); -} - - - - - - -#endif // EIGEN_CHOLMODSUPPORT_LEGACY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h similarity index 87% rename from gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h rename to gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h index 93e75f4c6..151d46538 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h @@ -25,7 +25,11 @@ #ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H #define EIGEN_DYNAMIC_SPARSEMATRIX_H -/** \class DynamicSparseMatrix +namespace Eigen { + +/** \deprecated use a SparseMatrix in an uncompressed mode + * + * \class DynamicSparseMatrix * * \brief A sparse matrix class designed for matrix assembly purpose * @@ -64,7 +68,7 @@ struct traits > } template -class DynamicSparseMatrix + class DynamicSparseMatrix : public SparseMatrixBase > { public: @@ -84,7 +88,7 @@ class DynamicSparseMatrix typedef DynamicSparseMatrix TransposedSparseMatrix; Index m_innerSize; - std::vector > m_data; + std::vector > m_data; public: @@ -94,8 +98,8 @@ class DynamicSparseMatrix inline Index outerSize() const { return static_cast(m_data.size()); } inline Index innerNonZeros(Index j) const { return m_data[j].size(); } - std::vector >& _data() { return m_data; } - const std::vector >& _data() const { return m_data; } + std::vector >& _data() { return m_data; } + const std::vector >& _data() const { return m_data; } /** \returns the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. @@ -119,6 +123,7 @@ class DynamicSparseMatrix } class InnerIterator; + class ReverseInnerIterator; void setZero() { @@ -232,20 +237,23 @@ class DynamicSparseMatrix } } - inline DynamicSparseMatrix() + /** The class DynamicSparseMatrix is deprectaed */ + EIGEN_DEPRECATED inline DynamicSparseMatrix() : m_innerSize(0), m_data(0) { eigen_assert(innerSize()==0 && outerSize()==0); } - inline DynamicSparseMatrix(Index rows, Index cols) + /** The class DynamicSparseMatrix is deprectaed */ + EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols) : m_innerSize(0) { resize(rows, cols); } + /** The class DynamicSparseMatrix is deprectaed */ template - explicit inline DynamicSparseMatrix(const SparseMatrixBase& other) + EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase& other) : m_innerSize(0) { Base::operator=(other.derived()); @@ -325,12 +333,12 @@ class DynamicSparseMatrix # ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN # include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN # endif -}; + }; template -class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator +class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator { - typedef typename SparseVector::InnerIterator Base; + typedef typename SparseVector::InnerIterator Base; public: InnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) @@ -343,4 +351,22 @@ class DynamicSparseMatrix::InnerIterator : public Sparse const Index m_outer; }; +template +class DynamicSparseMatrix::ReverseInnerIterator : public SparseVector::ReverseInnerIterator +{ + typedef typename SparseVector::ReverseInnerIterator Base; + public: + ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer) + : Base(mat.m_data[outer]), m_outer(outer) + {} + + inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } + inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } + + protected: + const Index m_outer; +}; + +} // end namespace Eigen + #endif // EIGEN_DYNAMIC_SPARSEMATRIX_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h new file mode 100644 index 000000000..9cfe1d9f5 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h @@ -0,0 +1,288 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Desire NUENTSA WAKAM +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_MARKET_IO_H +#define EIGEN_SPARSE_MARKET_IO_H + +#include + +namespace Eigen { + +namespace internal +{ + template + inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, Scalar& value) + { + line >> i >> j >> value; + i--; + j--; + if(i>=0 && j>=0 && i + inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, std::complex& value) + { + Scalar valR, valI; + line >> i >> j >> valR >> valI; + i--; + j--; + if(i>=0 && j>=0 && i(valR, valI); + return true; + } + else + return false; + } + + template + inline void GetVectorElt (const std::string& line, RealScalar& val) + { + std::istringstream newline(line); + newline >> val; + } + + template + inline void GetVectorElt (const std::string& line, std::complex& val) + { + RealScalar valR, valI; + std::istringstream newline(line); + newline >> valR >> valI; + val = std::complex(valR, valI); + } + + template + inline void putMarketHeader(std::string& header,int sym) + { + header= "%%MatrixMarket matrix coordinate "; + if(internal::is_same >::value || internal::is_same >::value) + { + header += " complex"; + if(sym == Symmetric) header += " symmetric"; + else if (sym == SelfAdjoint) header += " Hermitian"; + else header += " general"; + } + else + { + header += " real"; + if(sym == Symmetric) header += " symmetric"; + else header += " general"; + } + } + + template + inline void PutMatrixElt(Scalar value, int row, int col, std::ofstream& out) + { + out << row << " "<< col << " " << value << "\n"; + } + template + inline void PutMatrixElt(std::complex value, int row, int col, std::ofstream& out) + { + out << row << " " << col << " " << value.real() << " " << value.imag() << "\n"; + } + + + template + inline void putVectorElt(Scalar value, std::ofstream& out) + { + out << value << "\n"; + } + template + inline void putVectorElt(std::complex value, std::ofstream& out) + { + out << value.real << " " << value.imag()<< "\n"; + } + +} // end namepsace internal + +inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector) +{ + sym = 0; + isvector = false; + std::ifstream in(filename.c_str(),std::ios::in); + if(!in) + return false; + + std::string line; + // The matrix header is always the first line in the file + std::getline(in, line); assert(in.good()); + + std::stringstream fmtline(line); + std::string substr[5]; + fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4]; + if(substr[2].compare("array") == 0) isvector = true; + if(substr[3].compare("complex") == 0) iscomplex = true; + if(substr[4].compare("symmetric") == 0) sym = Symmetric; + else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint; + + return true; +} + +template +bool loadMarket(SparseMatrixType& mat, const std::string& filename) +{ + typedef typename SparseMatrixType::Scalar Scalar; + std::ifstream input(filename.c_str(),std::ios::in); + if(!input) + return false; + + const int maxBuffersize = 2048; + char buffer[maxBuffersize]; + + bool readsizes = false; + + typedef Triplet T; + std::vector elements; + + int M(-1), N(-1), NNZ(-1); + int count = 0; + while(input.getline(buffer, maxBuffersize)) + { + // skip comments + //NOTE An appropriate test should be done on the header to get the symmetry + if(buffer[0]=='%') + continue; + + std::stringstream line(buffer); + + if(!readsizes) + { + line >> M >> N >> NNZ; + if(M > 0 && N > 0 && NNZ > 0) + { + readsizes = true; + std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n"; + mat.resize(M,N); + mat.reserve(NNZ); + } + } + else + { + int i(-1), j(-1); + Scalar value; + if( internal::GetMarketLine(line, M, N, i, j, value) ) + { + ++ count; + elements.push_back(T(i,j,value)); + } + else + std::cerr << "Invalid read: " << i << "," << j << "\n"; + } + } + mat.setFromTriplets(elements.begin(), elements.end()); + if(count!=NNZ) + std::cerr << count << "!=" << NNZ << "\n"; + + input.close(); + return true; +} + +template +bool loadMarketVector(VectorType& vec, const std::string& filename) +{ + typedef typename VectorType::Scalar Scalar; + std::ifstream in(filename.c_str(), std::ios::in); + if(!in) + return false; + + std::string line; + int n(0), col(0); + do + { // Skip comments + std::getline(in, line); assert(in.good()); + } while (line[0] == '%'); + std::istringstream newline(line); + newline >> n >> col; + assert(n>0 && col>0); + vec.resize(n); + int i = 0; + Scalar value; + while ( std::getline(in, line) && (i < n) ){ + internal::GetVectorElt(line, value); + vec(i++) = value; + } + in.close(); + if (i!=n){ + std::cerr<< "Unable to read all elements from file " << filename << "\n"; + return false; + } + return true; +} + +template +bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0) +{ + typedef typename SparseMatrixType::Scalar Scalar; + std::ofstream out(filename.c_str(),std::ios::out); + if(!out) + return false; + + out.flags(std::ios_base::scientific); + out.precision(64); + std::string header; + internal::putMarketHeader(header, sym); + out << header << std::endl; + out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n"; + int count = 0; + for(int j=0; j +bool saveMarketVector (const VectorType& vec, const std::string& filename) +{ + typedef typename VectorType::Scalar Scalar; + std::ofstream out(filename.c_str(),std::ios::out); + if(!out) + return false; + + out.flags(std::ios_base::scientific); + out.precision(64); + if(internal::is_same >::value || internal::is_same >::value) + out << "%%MatrixMarket matrix array complex general\n"; + else + out << "%%MatrixMarket matrix array real general\n"; + out << vec.size() << " "<< 1 << "\n"; + for (int i=0; i < vec.size(); i++){ + internal::putVectorElt(vec(i), out); + } + out.close(); + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_MARKET_IO_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h new file mode 100644 index 000000000..3c34effca --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h @@ -0,0 +1,236 @@ + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire NUENTSA WAKAM +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_BROWSE_MATRICES_H +#define EIGEN_BROWSE_MATRICES_H + +namespace Eigen { + +enum { + SPD = 0x100, + NonSymmetric = 0x0 +}; + +/** + * @brief Iterator to browse matrices from a specified folder + * + * This is used to load all the matrices from a folder. + * The matrices should be in Matrix Market format + * It is assumed that the matrices are named as matname.mtx + * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian) + * The right hand side vectors are loaded as well, if they exist. + * They should be named as matname_b.mtx. + * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx + * + * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx + * + * Sample code + * \code + * + * \endcode + * + * \tparam Scalar The scalar type + */ +template +class MatrixMarketIterator +{ + public: + typedef Matrix VectorType; + typedef SparseMatrix MatrixType; + + public: + MatrixMarketIterator(const std::string folder):m_sym(0),m_isvalid(false),m_matIsLoaded(false),m_hasRhs(false),m_hasrefX(false),m_folder(folder) + { + m_folder_id = opendir(folder.c_str()); + if (!m_folder_id){ + m_isvalid = false; + std::cerr << "The provided Matrix folder could not be opened \n\n"; + abort(); + } + Getnextvalidmatrix(); + } + + ~MatrixMarketIterator() + { + if (m_folder_id) closedir(m_folder_id); + } + + inline MatrixMarketIterator& operator++() + { + m_matIsLoaded = false; + m_hasrefX = false; + m_hasRhs = false; + Getnextvalidmatrix(); + return *this; + } + inline operator bool() const { return m_isvalid;} + + /** Return the sparse matrix corresponding to the current file */ + inline MatrixType& matrix() + { + // Read the matrix + if (m_matIsLoaded) return m_mat; + + std::string matrix_file = m_folder + "/" + m_matname + ".mtx"; + if ( !loadMarket(m_mat, matrix_file)) + { + m_matIsLoaded = false; + return m_mat; + } + m_matIsLoaded = true; + + if (m_sym != NonSymmetric) + { // Store the upper part of the matrix. It is needed by the solvers dealing with nonsymmetric matrices ?? + MatrixType B; + B = m_mat; + m_mat = B.template selfadjointView(); + } + return m_mat; + } + + /** Return the right hand side corresponding to the current matrix. + * If the rhs file is not provided, a random rhs is generated + */ + inline VectorType& rhs() + { + // Get the right hand side + if (m_hasRhs) return m_rhs; + + std::string rhs_file; + rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx + m_hasRhs = Fileexists(rhs_file); + if (m_hasRhs) + { + m_rhs.resize(m_mat.cols()); + m_hasRhs = loadMarketVector(m_rhs, rhs_file); + } + if (!m_hasRhs) + { + // Generate a random right hand side + if (!m_matIsLoaded) this->matrix(); + m_refX.resize(m_mat.cols()); + m_refX.setRandom(); + m_rhs = m_mat * m_refX; + m_hasrefX = true; + m_hasRhs = true; + } + return m_rhs; + } + + /** Return a reference solution + * If it is not provided and if the right hand side is not available + * then refX is randomly generated such that A*refX = b + * where A and b are the matrix and the rhs. + * Note that when a rhs is provided, refX is not available + */ + inline VectorType& refX() + { + // Check if a reference solution is provided + if (m_hasrefX) return m_refX; + + std::string lhs_file; + lhs_file = m_folder + "/" + m_matname + "_x.mtx"; + m_hasrefX = Fileexists(lhs_file); + if (m_hasrefX) + { + m_refX.resize(m_mat.cols()); + m_hasrefX = loadMarketVector(m_refX, lhs_file); + } + return m_refX; + } + + inline std::string& matname() { return m_matname; } + + inline int sym() { return m_sym; } + + inline bool hasRhs() {return m_hasRhs; } + inline bool hasrefX() {return m_hasrefX; } + + protected: + + inline bool Fileexists(std::string file) + { + std::ifstream file_id(file.c_str()); + if (!file_id.good() ) + { + return false; + } + else + { + file_id.close(); + return true; + } + } + + void Getnextvalidmatrix( ) + { + m_isvalid = false; + // Here, we return with the next valid matrix in the folder + while ( (m_curs_id = readdir(m_folder_id)) != NULL) { + m_isvalid = false; + std::string curfile; + curfile = m_folder + "/" + m_curs_id->d_name; + // Discard if it is a folder + if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems +// struct stat st_buf; +// stat (curfile.c_str(), &st_buf); +// if (S_ISDIR(st_buf.st_mode)) continue; + + // Determine from the header if it is a matrix or a right hand side + bool isvector,iscomplex; + if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue; + if(isvector) continue; + + // Get the matrix name + std::string filename = m_curs_id->d_name; + m_matname = filename.substr(0, filename.length()-4); + + // Find if the matrix is SPD + size_t found = m_matname.find("SPD"); + if( (found!=std::string::npos) && (m_sym != NonSymmetric) ) + m_sym = SPD; + + m_isvalid = true; + break; + } + } + int m_sym; // Symmetry of the matrix + MatrixType m_mat; // Current matrix + VectorType m_rhs; // Current vector + VectorType m_refX; // The reference solution, if exists + std::string m_matname; // Matrix Name + bool m_isvalid; + bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file + bool m_hasRhs; // The right hand side exists + bool m_hasrefX; // A reference solution is provided + std::string m_folder; + DIR * m_folder_id; + struct dirent *m_curs_id; + +}; + +} // end namespace Eigen + +#endif diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h index 4ea41af85..9328c60cf 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h @@ -25,6 +25,8 @@ #ifndef EIGEN_RANDOMSETTER_H #define EIGEN_RANDOMSETTER_H +namespace Eigen { + /** Represents a std::map * * \see RandomSetter @@ -180,9 +182,7 @@ class RandomSetter enum { SwapStorage = 1 - MapTraits::IsSorted, TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0, - SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor, - IsUpper = SparseMatrixType::Flags & Upper, - IsLower = SparseMatrixType::Flags & Lower + SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor }; public: @@ -227,6 +227,7 @@ class RandomSetter if (!SwapStorage) // also means the map is sorted { mp_target->setZero(); + mp_target->makeCompressed(); mp_target->reserve(nonZeros()); Index prevOuter = -1; for (Index k=0; kouterSize(); ++j) { Index tmp = positions[j]; - mp_target->_outerIndexPtr()[j] = count; + mp_target->outerIndexPtr()[j] = count; positions[j] = count; count += tmp; } - mp_target->_outerIndexPtr()[mp_target->outerSize()] = count; + mp_target->makeCompressed(); + mp_target->outerIndexPtr()[mp_target->outerSize()] = count; mp_target->resizeNonZeros(count); // pass 2 for (Index k=0; k_outerIndexPtr()[outer]; + Index posStart = mp_target->outerIndexPtr()[outer]; Index i = (positions[outer]++) - 1; - while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) ) + while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) ) { - mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i]; - mp_target->_innerIndexPtr()[i+1] = mp_target->_innerIndexPtr()[i]; + mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i]; + mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i]; --i; } - mp_target->_innerIndexPtr()[i+1] = inner; - mp_target->_valuePtr()[i+1] = it->second.value; + mp_target->innerIndexPtr()[i+1] = inner; + mp_target->valuePtr()[i+1] = it->second.value; } } } @@ -305,8 +307,6 @@ class RandomSetter /** \returns a reference to the coefficient at given coordinates \a row, \a col */ Scalar& operator() (Index row, Index col) { - eigen_assert(((!IsUpper) || (row<=col)) && "Invalid access to an upper triangular matrix"); - eigen_assert(((!IsLower) || (col<=row)) && "Invalid access to an upper triangular matrix"); const Index outer = SetterRowMajor ? row : col; const Index inner = SetterRowMajor ? col : row; const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map @@ -337,4 +337,6 @@ class RandomSetter unsigned char m_keyBitsOffset; }; +} // end namespace Eigen + #endif // EIGEN_RANDOMSETTER_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h deleted file mode 100644 index 6af6407c7..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h +++ /dev/null @@ -1,477 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2010 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -/* - -NOTE: the _symbolic, and _numeric functions has been adapted from - the LDL library: - -LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. - -LDL License: - - Your use or distribution of LDL or any modified version of - LDL implies that you agree to this License. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA - - Permission is hereby granted to use or copy this program under the - terms of the GNU LGPL, provided that the Copyright, this License, - and the Availability of the original version is retained on all copies. - User documentation of any code that uses this code or any modified - version of this code must cite the Copyright, this License, the - Availability note, and "Used by permission." Permission to modify - the code and to distribute modified code is granted, provided the - Copyright, this License, and the Availability note are retained, - and a notice that the code was modified is included. - */ - -#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H -#define EIGEN_SIMPLICIAL_CHOLESKY_H - -enum SimplicialCholeskyMode { - SimplicialCholeskyLLt, - SimplicialCholeskyLDLt -}; - -/** \brief A direct sparse Cholesky factorization - * - * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization. - * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices - * X and B can be either dense or sparse. - * - * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> - * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower - * or Upper. Default is Lower. - * - */ -template -class SimplicialCholesky -{ - public: - typedef _MatrixType MatrixType; - enum { UpLo = _UpLo }; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; - typedef Matrix VectorType; - - public: - - SimplicialCholesky() - : m_info(Success), m_isInitialized(false), m_LDLt(true) - {} - - SimplicialCholesky(const MatrixType& matrix) - : m_info(Success), m_isInitialized(false), m_LDLt(true) - { - compute(matrix); - } - - ~SimplicialCholesky() - { - } - - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } - - SimplicialCholesky& setMode(SimplicialCholeskyMode mode) - { - switch(mode) - { - case SimplicialCholeskyLLt: - m_LDLt = false; - break; - case SimplicialCholeskyLDLt: - m_LDLt = true; - break; - default: - break; - } - - return *this; - } - - /** \brief Reports whether previous computation was successful. - * - * \returns \c Success if computation was succesful, - * \c NumericalIssue if the matrix.appears to be negative. - */ - ComputationInfo info() const - { - eigen_assert(m_isInitialized && "Decomposition is not initialized."); - return m_info; - } - - /** Computes the sparse Cholesky decomposition of \a matrix */ - SimplicialCholesky& compute(const MatrixType& matrix) - { - analyzePattern(matrix); - factorize(matrix); - return *this; - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(m_isInitialized && "SimplicialCholesky is not initialized."); - eigen_assert(rows()==b.rows() - && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b"); - return internal::solve_retval(*this, b.derived()); - } - - /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. - * - * \sa compute() - */ -// template -// inline const internal::sparse_solve_retval -// solve(const SparseMatrixBase& b) const -// { -// eigen_assert(m_isInitialized && "SimplicialCholesky is not initialized."); -// eigen_assert(rows()==b.rows() -// && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b"); -// return internal::sparse_solve_retval(*this, b.derived()); -// } - - /** Performs a symbolic decomposition on the sparcity of \a matrix. - * - * This function is particularly useful when solving for several problems having the same structure. - * - * \sa factorize() - */ - void analyzePattern(const MatrixType& a); - - - /** Performs a numeric decomposition of \a matrix - * - * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. - * - * \sa analyzePattern() - */ - void factorize(const MatrixType& a); - - /** \returns the permutation P - * \sa permutationPinv() */ - const PermutationMatrix& permutationP() const - { return m_P; } - - /** \returns the inverse P^-1 of the permutation P - * \sa permutationP() */ - const PermutationMatrix& permutationPinv() const - { return m_Pinv; } - - #ifndef EIGEN_PARSED_BY_DOXYGEN - /** \internal */ - template - void _solve(const MatrixBase &b, MatrixBase &dest) const - { - eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); - eigen_assert(m_matrix.rows()==b.rows()); - - if(m_info!=Success) - return; - - if(m_P.size()>0) - dest = m_Pinv * b; - else - dest = b; - - if(m_LDLt) - { - if(m_matrix.nonZeros()>0) // otherwise L==I - m_matrix.template triangularView().solveInPlace(dest); - - dest = m_diag.asDiagonal().inverse() * dest; - - if (m_matrix.nonZeros()>0) // otherwise L==I - m_matrix.adjoint().template triangularView().solveInPlace(dest); - } - else - { - if(m_matrix.nonZeros()>0) // otherwise L==I - m_matrix.template triangularView().solveInPlace(dest); - - if (m_matrix.nonZeros()>0) // otherwise L==I - m_matrix.adjoint().template triangularView().solveInPlace(dest); - } - - if(m_P.size()>0) - dest = m_P * dest; - } - - /** \internal */ - /* - template - void _solve(const SparseMatrix &b, SparseMatrix &dest) const - { - // TODO - } - */ - #endif // EIGEN_PARSED_BY_DOXYGEN - - template - void dumpMemory(Stream& s) - { - int total = 0; - s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n"; - s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n"; - s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n"; - s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n"; - s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n"; - s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n"; - s << " TOTAL: " << (total>> 20) << "Mb" << "\n"; - } - - protected: - /** keeps off-diagonal entries; drops diagonal entries */ - struct keep_diag { - inline bool operator() (const Index& row, const Index& col, const Scalar&) const - { - return row!=col; - } - }; - - mutable ComputationInfo m_info; - bool m_isInitialized; - bool m_factorizationIsOk; - bool m_analysisIsOk; - bool m_LDLt; - - CholMatrixType m_matrix; - VectorType m_diag; // the diagonal coefficients in case of a LDLt decomposition - VectorXi m_parent; // elimination tree - VectorXi m_nonZerosPerCol; - PermutationMatrix m_P; // the permutation - PermutationMatrix m_Pinv; // the inverse permutation -}; - -template -void SimplicialCholesky<_MatrixType,_UpLo>::analyzePattern(const MatrixType& a) -{ - eigen_assert(a.rows()==a.cols()); - const Index size = a.rows(); - m_matrix.resize(size, size); - m_parent.resize(size); - m_nonZerosPerCol.resize(size); - - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - - // TODO allows to configure the permutation - { - CholMatrixType C; - C = a.template selfadjointView(); - // remove diagonal entries: - C.prune(keep_diag()); - internal::minimum_degree_ordering(C, m_P); - } - - if(m_P.size()>0) - m_Pinv = m_P.inverse(); - else - m_Pinv.resize(0); - - SparseMatrix ap(size,size); - ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_Pinv); - - for(Index k = 0; k < size; ++k) - { - /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ - m_parent[k] = -1; /* parent of k is not yet known */ - tags[k] = k; /* mark node k as visited */ - m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ - for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) - { - Index i = it.index(); - if(i < k) - { - /* follow path from i to root of etree, stop at flagged node */ - for(; tags[i] != k; i = m_parent[i]) - { - /* find parent of i if not yet determined */ - if (m_parent[i] == -1) - m_parent[i] = k; - m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ - tags[i] = k; /* mark i as visited */ - } - } - } - } - - /* construct Lp index array from m_nonZerosPerCol column counts */ - Index* Lp = m_matrix._outerIndexPtr(); - Lp[0] = 0; - for(Index k = 0; k < size; ++k) - Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (m_LDLt ? 0 : 1); - - m_matrix.resizeNonZeros(Lp[size]); - - m_isInitialized = true; - m_info = Success; - m_analysisIsOk = true; - m_factorizationIsOk = false; -} - - -template -void SimplicialCholesky<_MatrixType,_UpLo>::factorize(const MatrixType& a) -{ - eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); - eigen_assert(a.rows()==a.cols()); - const Index size = a.rows(); - eigen_assert(m_parent.size()==size); - eigen_assert(m_nonZerosPerCol.size()==size); - - const Index* Lp = m_matrix._outerIndexPtr(); - Index* Li = m_matrix._innerIndexPtr(); - Scalar* Lx = m_matrix._valuePtr(); - - ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - - SparseMatrix ap(size,size); - ap.template selfadjointView() = a.template selfadjointView().twistedBy(m_Pinv); - - bool ok = true; - m_diag.resize(m_LDLt ? size : 0); - - for(Index k = 0; k < size; ++k) - { - // compute nonzero pattern of kth row of L, in topological order - y[k] = 0.0; // Y(0:k) is now all zero - Index top = size; // stack for pattern is empty - tags[k] = k; // mark node k as visited - m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L - for(typename MatrixType::InnerIterator it(ap,k); it; ++it) - { - Index i = it.index(); - if(i <= k) - { - y[i] += internal::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ - Index len; - for(len = 0; tags[i] != k; i = m_parent[i]) - { - pattern[len++] = i; /* L(k,i) is nonzero */ - tags[i] = k; /* mark i as visited */ - } - while(len > 0) - pattern[--top] = pattern[--len]; - } - } - - /* compute numerical values kth row of L (a sparse triangular solve) */ - Scalar d = y[k]; // get D(k,k) and clear Y(k) - y[k] = 0.0; - for(; top < size; ++top) - { - Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ - Scalar yi = y[i]; /* get and clear Y(i) */ - y[i] = 0.0; - - /* the nonzero entry L(k,i) */ - Scalar l_ki; - if(m_LDLt) - l_ki = yi / m_diag[i]; - else - yi = l_ki = yi / Lx[Lp[i]]; - - Index p2 = Lp[i] + m_nonZerosPerCol[i]; - Index p; - for(p = Lp[i] + (m_LDLt ? 0 : 1); p < p2; ++p) - y[Li[p]] -= internal::conj(Lx[p]) * yi; - d -= l_ki * internal::conj(yi); - Li[p] = k; /* store L(k,i) in column form of L */ - Lx[p] = l_ki; - ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ - } - if(m_LDLt) - m_diag[k] = d; - else - { - Index p = Lp[k]+m_nonZerosPerCol[k]++; - Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ - Lx[p] = internal::sqrt(d) ; - } - if(d == Scalar(0)) - { - ok = false; /* failure, D(k,k) is zero */ - break; - } - } - - m_info = ok ? Success : NumericalIssue; - m_factorizationIsOk = true; -} - -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SimplicialCholesky<_MatrixType,_UpLo> Dec; - EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve(rhs(),dst); - } -}; - -template -struct sparse_solve_retval, Rhs> - : sparse_solve_retval_base, Rhs> -{ - typedef SimplicialCholesky<_MatrixType,_UpLo> Dec; - EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) - - template void evalTo(Dest& dst) const - { - dec()._solve(rhs(),dst); - } -}; - -} - -#endif // EIGEN_SIMPLICIAL_CHOLESKY_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h deleted file mode 100644 index 14283c117..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h +++ /dev/null @@ -1,414 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -/* - -NOTE: the _symbolic, and _numeric functions has been adapted from - the LDL library: - -LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. - -LDL License: - - Your use or distribution of LDL or any modified version of - LDL implies that you agree to this License. - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 - USA - - Permission is hereby granted to use or copy this program under the - terms of the GNU LGPL, provided that the Copyright, this License, - and the Availability of the original version is retained on all copies. - User documentation of any code that uses this code or any modified - version of this code must cite the Copyright, this License, the - Availability note, and "Used by permission." Permission to modify - the code and to distribute modified code is granted, provided the - Copyright, this License, and the Availability note are retained, - and a notice that the code was modified is included. - */ - -#ifndef EIGEN_SPARSELDLT_LEGACY_H -#define EIGEN_SPARSELDLT_LEGACY_H - -/** \ingroup Sparse_Module - * - * \class SparseLDLT - * - * \brief LDLT Cholesky decomposition of a sparse matrix and associated features - * - * \param MatrixType the type of the matrix of which we are computing the LDLT Cholesky decomposition - * - * \warning the upper triangular part has to be specified. The rest of the matrix is not used. The input matrix must be column major. - * - * \sa class LDLT, class LDLT - */ -template -class SparseLDLT -{ - protected: - typedef typename _MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - - typedef Matrix VectorType; - - enum { - SupernodalFactorIsDirty = 0x10000, - MatrixLIsDirty = 0x20000 - }; - - public: - typedef SparseMatrix CholMatrixType; - typedef _MatrixType MatrixType; - typedef typename MatrixType::Index Index; - - - /** Creates a dummy LDLT factorization object with flags \a flags. */ - SparseLDLT(int flags = 0) - : m_flags(flags), m_status(0) - { - eigen_assert((MatrixType::Flags&RowMajorBit)==0); - m_precision = RealScalar(0.1) * Eigen::NumTraits::dummy_precision(); - } - - /** Creates a LDLT object and compute the respective factorization of \a matrix using - * flags \a flags. */ - SparseLDLT(const MatrixType& matrix, int flags = 0) - : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0) - { - eigen_assert((MatrixType::Flags&RowMajorBit)==0); - m_precision = RealScalar(0.1) * Eigen::NumTraits::dummy_precision(); - compute(matrix); - } - - /** Sets the relative threshold value used to prune zero coefficients during the decomposition. - * - * Setting a value greater than zero speeds up computation, and yields to an imcomplete - * factorization with fewer non zero coefficients. Such approximate factors are especially - * useful to initialize an iterative solver. - * - * \warning if precision is greater that zero, the LDLT factorization is not guaranteed to succeed - * even if the matrix is positive definite. - * - * Note that the exact meaning of this parameter might depends on the actual - * backend. Moreover, not all backends support this feature. - * - * \sa precision() */ - void setPrecision(RealScalar v) { m_precision = v; } - - /** \returns the current precision. - * - * \sa setPrecision() */ - RealScalar precision() const { return m_precision; } - - /** Sets the flags. Possible values are: - * - CompleteFactorization - * - IncompleteFactorization - * - MemoryEfficient (hint to use the memory most efficient method offered by the backend) - * - SupernodalMultifrontal (implies a complete factorization if supported by the backend, - * overloads the MemoryEfficient flags) - * - SupernodalLeftLooking (implies a complete factorization if supported by the backend, - * overloads the MemoryEfficient flags) - * - * \sa flags() */ - void settags(int f) { m_flags = f; } - /** \returns the current flags */ - int flags() const { return m_flags; } - - /** Computes/re-computes the LDLT factorization */ - void compute(const MatrixType& matrix); - - /** Perform a symbolic factorization */ - void _symbolic(const MatrixType& matrix); - /** Perform the actual factorization using the previously - * computed symbolic factorization */ - bool _numeric(const MatrixType& matrix); - - /** \returns the lower triangular matrix L */ - inline const CholMatrixType& matrixL(void) const { return m_matrix; } - - /** \returns the coefficients of the diagonal matrix D */ - inline VectorType vectorD(void) const { return m_diag; } - - template - bool solveInPlace(MatrixBase &b) const; - - template - inline const internal::solve_retval, Rhs> - solve(const MatrixBase& b) const - { - eigen_assert(true && "SparseLDLT is not initialized."); - return internal::solve_retval, Rhs>(*this, b.derived()); - } - - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } - - inline const VectorType& diag() const { return m_diag; } - - /** \returns true if the factorization succeeded */ - inline bool succeeded(void) const { return m_succeeded; } - - protected: - CholMatrixType m_matrix; - VectorType m_diag; - VectorXi m_parent; // elimination tree - VectorXi m_nonZerosPerCol; -// VectorXi m_w; // workspace - PermutationMatrix m_P; - PermutationMatrix m_Pinv; - RealScalar m_precision; - int m_flags; - mutable int m_status; - bool m_succeeded; -}; - -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseLDLT<_MatrixType> SpLDLTDecType; - EIGEN_MAKE_SOLVE_HELPERS(SpLDLTDecType,Rhs) - - template void evalTo(Dest& dst) const - { - //Index size = dec().matrixL().rows(); - eigen_assert(dec().matrixL().rows()==rhs().rows()); - - Rhs b(rhs().rows(), rhs().cols()); - b = rhs(); - - if (dec().matrixL().nonZeros()>0) // otherwise L==I - dec().matrixL().template triangularView().solveInPlace(b); - - b = b.cwiseQuotient(dec().diag()); - if (dec().matrixL().nonZeros()>0) // otherwise L==I - dec().matrixL().adjoint().template triangularView().solveInPlace(b); - - dst = b; - - } - -}; - -} // end namespace internal - -/** Computes / recomputes the LDLT decomposition of matrix \a a - * using the default algorithm. - */ -template -void SparseLDLT<_MatrixType,Backend>::compute(const _MatrixType& a) -{ - _symbolic(a); - m_succeeded = _numeric(a); -} - -template -void SparseLDLT<_MatrixType,Backend>::_symbolic(const _MatrixType& a) -{ - assert(a.rows()==a.cols()); - const Index size = a.rows(); - m_matrix.resize(size, size); - m_parent.resize(size); - m_nonZerosPerCol.resize(size); - - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - - const Index* Ap = a._outerIndexPtr(); - const Index* Ai = a._innerIndexPtr(); - Index* Lp = m_matrix._outerIndexPtr(); - - const Index* P = 0; - Index* Pinv = 0; - - if(P) - { - m_P.indices() = VectorXi::Map(P,size); - m_Pinv = m_P.inverse(); - Pinv = m_Pinv.indices().data(); - } - else - { - m_P.resize(0); - m_Pinv.resize(0); - } - - for (Index k = 0; k < size; ++k) - { - /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ - m_parent[k] = -1; /* parent of k is not yet known */ - tags[k] = k; /* mark node k as visited */ - m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ - Index kk = P ? P[k] : k; /* kth original, or permuted, column */ - Index p2 = Ap[kk+1]; - for (Index p = Ap[kk]; p < p2; ++p) - { - /* A (i,k) is nonzero (original or permuted A) */ - Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; - if (i < k) - { - /* follow path from i to root of etree, stop at flagged node */ - for (; tags[i] != k; i = m_parent[i]) - { - /* find parent of i if not yet determined */ - if (m_parent[i] == -1) - m_parent[i] = k; - ++m_nonZerosPerCol[i]; /* L (k,i) is nonzero */ - tags[i] = k; /* mark i as visited */ - } - } - } - } - /* construct Lp index array from m_nonZerosPerCol column counts */ - Lp[0] = 0; - for (Index k = 0; k < size; ++k) - Lp[k+1] = Lp[k] + m_nonZerosPerCol[k]; - - m_matrix.resizeNonZeros(Lp[size]); -} - -template -bool SparseLDLT<_MatrixType,Backend>::_numeric(const _MatrixType& a) -{ - assert(a.rows()==a.cols()); - const Index size = a.rows(); - assert(m_parent.size()==size); - assert(m_nonZerosPerCol.size()==size); - - const Index* Ap = a._outerIndexPtr(); - const Index* Ai = a._innerIndexPtr(); - const Scalar* Ax = a._valuePtr(); - const Index* Lp = m_matrix._outerIndexPtr(); - Index* Li = m_matrix._innerIndexPtr(); - Scalar* Lx = m_matrix._valuePtr(); - m_diag.resize(size); - - ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - - Index* P = 0; - Index* Pinv = 0; - - if(m_P.size()==size) - { - P = m_P.indices().data(); - Pinv = m_Pinv.indices().data(); - } - - bool ok = true; - - for (Index k = 0; k < size; ++k) - { - /* compute nonzero pattern of kth row of L, in topological order */ - y[k] = 0.0; /* Y(0:k) is now all zero */ - Index top = size; /* stack for pattern is empty */ - tags[k] = k; /* mark node k as visited */ - m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ - Index kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */ - Index p2 = Ap[kk+1]; - for (Index p = Ap[kk]; p < p2; ++p) - { - Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */ - if (i <= k) - { - y[i] += internal::conj(Ax[p]); /* scatter A(i,k) into Y (sum duplicates) */ - Index len; - for (len = 0; tags[i] != k; i = m_parent[i]) - { - pattern[len++] = i; /* L(k,i) is nonzero */ - tags[i] = k; /* mark i as visited */ - } - while (len > 0) - pattern[--top] = pattern[--len]; - } - } - - /* compute numerical values kth row of L (a sparse triangular solve) */ - m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */ - y[k] = 0.0; - for (; top < size; ++top) - { - Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ - Scalar yi = (y[i]); /* get and clear Y(i) */ - y[i] = 0.0; - Index p2 = Lp[i] + m_nonZerosPerCol[i]; - Index p; - for (p = Lp[i]; p < p2; ++p) - y[Li[p]] -= internal::conj(Lx[p]) * (yi); - Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */ - m_diag[k] -= l_ki * internal::conj(yi); - Li[p] = k; /* store L(k,i) in column form of L */ - Lx[p] = (l_ki); - ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ - } - if (m_diag[k] == 0.0) - { - ok = false; /* failure, D(k,k) is zero */ - break; - } - } - - return ok; /* success, diagonal of D is all nonzero */ -} - -/** Computes b = L^-T D^-1 L^-1 b */ -template -template -bool SparseLDLT<_MatrixType, Backend>::solveInPlace(MatrixBase &b) const -{ - //Index size = m_matrix.rows(); - eigen_assert(m_matrix.rows()==b.rows()); - if (!m_succeeded) - return false; - - if(m_P.size()>0) - b = m_Pinv * b; - - if (m_matrix.nonZeros()>0) // otherwise L==I - m_matrix.template triangularView().solveInPlace(b); - b = b.cwiseQuotient(m_diag); - if (m_matrix.nonZeros()>0) // otherwise L==I - m_matrix.adjoint().template triangularView().solveInPlace(b); - - if(m_P.size()>0) - b = m_P * b; - - return true; -} - -#endif // EIGEN_SPARSELDLT_LEGACY_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLLT.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLLT.h deleted file mode 100644 index ac042217b..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLLT.h +++ /dev/null @@ -1,245 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_SPARSELLT_H -#define EIGEN_SPARSELLT_H - -/** \ingroup Sparse_Module - * - * \class SparseLLT - * - * \brief LLT Cholesky decomposition of a sparse matrix and associated features - * - * \param MatrixType the type of the matrix of which we are computing the LLT Cholesky decomposition - * - * \sa class LLT, class LDLT - */ -template -class SparseLLT -{ - protected: - typedef typename _MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - - enum { - SupernodalFactorIsDirty = 0x10000, - MatrixLIsDirty = 0x20000 - }; - - public: - typedef SparseMatrix CholMatrixType; - typedef _MatrixType MatrixType; - typedef typename MatrixType::Index Index; - - /** Creates a dummy LLT factorization object with flags \a flags. */ - SparseLLT(int flags = 0) - : m_flags(flags), m_status(0) - { - m_precision = RealScalar(0.1) * Eigen::NumTraits::dummy_precision(); - } - - /** Creates a LLT object and compute the respective factorization of \a matrix using - * flags \a flags. */ - SparseLLT(const MatrixType& matrix, int flags = 0) - : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0) - { - m_precision = RealScalar(0.1) * Eigen::NumTraits::dummy_precision(); - compute(matrix); - } - - /** Sets the relative threshold value used to prune zero coefficients during the decomposition. - * - * Setting a value greater than zero speeds up computation, and yields to an imcomplete - * factorization with fewer non zero coefficients. Such approximate factors are especially - * useful to initialize an iterative solver. - * - * \warning if precision is greater that zero, the LLT factorization is not guaranteed to succeed - * even if the matrix is positive definite. - * - * Note that the exact meaning of this parameter might depends on the actual - * backend. Moreover, not all backends support this feature. - * - * \sa precision() */ - void setPrecision(RealScalar v) { m_precision = v; } - - /** \returns the current precision. - * - * \sa setPrecision() */ - RealScalar precision() const { return m_precision; } - - /** Sets the flags. Possible values are: - * - CompleteFactorization - * - IncompleteFactorization - * - MemoryEfficient (hint to use the memory most efficient method offered by the backend) - * - SupernodalMultifrontal (implies a complete factorization if supported by the backend, - * overloads the MemoryEfficient flags) - * - SupernodalLeftLooking (implies a complete factorization if supported by the backend, - * overloads the MemoryEfficient flags) - * - * \sa flags() */ - void setFlags(int f) { m_flags = f; } - /** \returns the current flags */ - int flags() const { return m_flags; } - - /** Computes/re-computes the LLT factorization */ - void compute(const MatrixType& matrix); - - /** \returns the lower triangular matrix L */ - inline const CholMatrixType& matrixL(void) const { return m_matrix; } - - template - bool solveInPlace(MatrixBase &b) const; - - template - inline const internal::solve_retval, Rhs> - solve(const MatrixBase& b) const - { - eigen_assert(true && "SparseLLT is not initialized."); - return internal::solve_retval, Rhs>(*this, b.derived()); - } - - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } - - /** \returns true if the factorization succeeded */ - inline bool succeeded(void) const { return m_succeeded; } - - protected: - CholMatrixType m_matrix; - RealScalar m_precision; - int m_flags; - mutable int m_status; - bool m_succeeded; -}; - - -namespace internal { - -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseLLT<_MatrixType> SpLLTDecType; - EIGEN_MAKE_SOLVE_HELPERS(SpLLTDecType,Rhs) - - template void evalTo(Dest& dst) const - { - const Index size = dec().matrixL().rows(); - eigen_assert(size==rhs().rows()); - - Rhs b(rhs().rows(), rhs().cols()); - b = rhs(); - - dec().matrixL().template triangularView().solveInPlace(b); - dec().matrixL().adjoint().template triangularView().solveInPlace(b); - - dst = b; - - } - -}; - -} // end namespace internal - - -/** Computes / recomputes the LLT decomposition of matrix \a a - * using the default algorithm. - */ -template -void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a) -{ - assert(a.rows()==a.cols()); - const Index size = a.rows(); - m_matrix.resize(size, size); - - // allocate a temporary vector for accumulations - AmbiVector tempVector(size); - RealScalar density = a.nonZeros()/RealScalar(size*size); - - // TODO estimate the number of non zeros - m_matrix.setZero(); - m_matrix.reserve(a.nonZeros()*2); - for (Index j = 0; j < size; ++j) - { - Scalar x = internal::real(a.coeff(j,j)); - - // TODO better estimate of the density ! - tempVector.init(density>0.001? IsDense : IsSparse); - tempVector.setBounds(j+1,size); - tempVector.setZero(); - // init with current matrix a - { - typename _MatrixType::InnerIterator it(a,j); - eigen_assert(it.index()==j && - "matrix must has non zero diagonal entries and only the lower triangular part must be stored"); - ++it; // skip diagonal element - for (; it; ++it) - tempVector.coeffRef(it.index()) = it.value(); - } - for (Index k=0; k::Iterator it(tempVector, m_precision*rx); it; ++it) - { - // FIXME use insertBack - m_matrix.insert(it.index(), j) = it.value() * y; - } - } - m_matrix.finalize(); -} - -/** Computes b = L^-T L^-1 b */ -template -template -bool SparseLLT<_MatrixType, Backend>::solveInPlace(MatrixBase &b) const -{ - const Index size = m_matrix.rows(); - eigen_assert(size==b.rows()); - - m_matrix.template triangularView().solveInPlace(b); - m_matrix.adjoint().template triangularView().solveInPlace(b); - - return true; -} - -#endif // EIGEN_SPARSELLT_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLU.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLU.h deleted file mode 100644 index 3d10dbbee..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SparseLU.h +++ /dev/null @@ -1,163 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_SPARSELU_H -#define EIGEN_SPARSELU_H - -enum { - SvNoTrans = 0, - SvTranspose = 1, - SvAdjoint = 2 -}; - -/** \ingroup Sparse_Module - * - * \class SparseLU - * - * \brief LU decomposition of a sparse matrix and associated features - * - * \param _MatrixType the type of the matrix of which we are computing the LU factorization - * - * \sa class FullPivLU, class SparseLLT - */ -template -class SparseLU - { - protected: - typedef typename _MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef SparseMatrix LUMatrixType; - - enum { - MatrixLUIsDirty = 0x10000 - }; - - public: - typedef _MatrixType MatrixType; - - /** Creates a dummy LU factorization object with flags \a flags. */ - SparseLU(int flags = 0) - : m_flags(flags), m_status(0) - { - m_precision = RealScalar(0.1) * Eigen::NumTraits::dummy_precision(); - } - - /** Creates a LU object and compute the respective factorization of \a matrix using - * flags \a flags. */ - SparseLU(const _MatrixType& matrix, int flags = 0) - : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0) - { - m_precision = RealScalar(0.1) * Eigen::NumTraits::dummy_precision(); - compute(matrix); - } - - /** Sets the relative threshold value used to prune zero coefficients during the decomposition. - * - * Setting a value greater than zero speeds up computation, and yields to an imcomplete - * factorization with fewer non zero coefficients. Such approximate factors are especially - * useful to initialize an iterative solver. - * - * Note that the exact meaning of this parameter might depends on the actual - * backend. Moreover, not all backends support this feature. - * - * \sa precision() */ - void setPrecision(RealScalar v) { m_precision = v; } - - /** \returns the current precision. - * - * \sa setPrecision() */ - RealScalar precision() const { return m_precision; } - - /** Sets the flags. Possible values are: - * - CompleteFactorization - * - IncompleteFactorization - * - MemoryEfficient - * - one of the ordering methods - * - etc... - * - * \sa flags() */ - void setFlags(int f) { m_flags = f; } - /** \returns the current flags */ - int flags() const { return m_flags; } - - void setOrderingMethod(int m) - { - eigen_assert( (m&~OrderingMask) == 0 && m!=0 && "invalid ordering method"); - m_flags = m_flags&~OrderingMask | m&OrderingMask; - } - - int orderingMethod() const - { - return m_flags&OrderingMask; - } - - /** Computes/re-computes the LU factorization */ - void compute(const _MatrixType& matrix); - - /** \returns the lower triangular matrix L */ - //inline const _MatrixType& matrixL() const { return m_matrixL; } - - /** \returns the upper triangular matrix U */ - //inline const _MatrixType& matrixU() const { return m_matrixU; } - - template - bool solve(const MatrixBase &b, MatrixBase* x, - const int transposed = SvNoTrans) const; - - /** \returns true if the factorization succeeded */ - inline bool succeeded(void) const { return m_succeeded; } - - protected: - RealScalar m_precision; - int m_flags; - mutable int m_status; - bool m_succeeded; -}; - -/** Computes / recomputes the LU decomposition of matrix \a a - * using the default algorithm. - */ -template -void SparseLU<_MatrixType,Backend>::compute(const _MatrixType& ) -{ - eigen_assert(false && "not implemented yet"); -} - -/** Computes *x = U^-1 L^-1 b - * - * If \a transpose is set to SvTranspose or SvAdjoint, the solution - * of the transposed/adjoint system is computed instead. - * - * Not all backends implement the solution of the transposed or - * adjoint system. - */ -template -template -bool SparseLU<_MatrixType,Backend>::solve(const MatrixBase &, MatrixBase* , const int ) const -{ - eigen_assert(false && "not implemented yet"); - return false; -} - -#endif // EIGEN_SPARSELU_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h deleted file mode 100644 index bb7312190..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h +++ /dev/null @@ -1,667 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2009 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_SUPERLUSUPPORT_H -#define EIGEN_SUPERLUSUPPORT_H - -#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \ - extern "C" { \ - typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t; \ - extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ - char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ - void *, int, SuperMatrix *, SuperMatrix *, \ - FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \ - PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \ - } \ - inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ - int *perm_c, int *perm_r, int *etree, char *equed, \ - FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ - SuperMatrix *U, void *work, int lwork, \ - SuperMatrix *B, SuperMatrix *X, \ - FLOATTYPE *recip_pivot_growth, \ - FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ - SuperLUStat_t *stats, int *info, KEYTYPE) { \ - PREFIX##mem_usage_t mem_usage; \ - PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ - U, work, lwork, B, X, recip_pivot_growth, rcond, \ - ferr, berr, &mem_usage, stats, info); \ - return mem_usage.for_lu; /* bytes used by the factor storage */ \ - } - -DECL_GSSVX(s,float,float) -DECL_GSSVX(c,float,std::complex) -DECL_GSSVX(d,double,double) -DECL_GSSVX(z,double,std::complex) - -#ifdef MILU_ALPHA -#define EIGEN_SUPERLU_HAS_ILU -#endif - -#ifdef EIGEN_SUPERLU_HAS_ILU - -// similarly for the incomplete factorization using gsisx -#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \ - extern "C" { \ - extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ - char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ - void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \ - PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \ - } \ - inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \ - int *perm_c, int *perm_r, int *etree, char *equed, \ - FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ - SuperMatrix *U, void *work, int lwork, \ - SuperMatrix *B, SuperMatrix *X, \ - FLOATTYPE *recip_pivot_growth, \ - FLOATTYPE *rcond, \ - SuperLUStat_t *stats, int *info, KEYTYPE) { \ - PREFIX##mem_usage_t mem_usage; \ - PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ - U, work, lwork, B, X, recip_pivot_growth, rcond, \ - &mem_usage, stats, info); \ - return mem_usage.for_lu; /* bytes used by the factor storage */ \ - } - -DECL_GSISX(s,float,float) -DECL_GSISX(c,float,std::complex) -DECL_GSISX(d,double,double) -DECL_GSISX(z,double,std::complex) - -#endif - -template -struct SluMatrixMapHelper; - -/** \internal - * - * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices - * and dense matrices. Supernodal and other fancy format are not supported by this wrapper. - * - * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure. - */ -struct SluMatrix : SuperMatrix -{ - SluMatrix() - { - Store = &storage; - } - - SluMatrix(const SluMatrix& other) - : SuperMatrix(other) - { - Store = &storage; - storage = other.storage; - } - - SluMatrix& operator=(const SluMatrix& other) - { - SuperMatrix::operator=(static_cast(other)); - Store = &storage; - storage = other.storage; - return *this; - } - - struct - { - union {int nnz;int lda;}; - void *values; - int *innerInd; - int *outerInd; - } storage; - - void setStorageType(Stype_t t) - { - Stype = t; - if (t==SLU_NC || t==SLU_NR || t==SLU_DN) - Store = &storage; - else - { - eigen_assert(false && "storage type not supported"); - Store = 0; - } - } - - template - void setScalarType() - { - if (internal::is_same::value) - Dtype = SLU_S; - else if (internal::is_same::value) - Dtype = SLU_D; - else if (internal::is_same >::value) - Dtype = SLU_C; - else if (internal::is_same >::value) - Dtype = SLU_Z; - else - { - eigen_assert(false && "Scalar type not supported by SuperLU"); - } - } - - template - static SluMatrix Map(Matrix& mat) - { - typedef Matrix MatrixType; - eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); - SluMatrix res; - res.setStorageType(SLU_DN); - res.setScalarType(); - res.Mtype = SLU_GE; - - res.nrow = mat.rows(); - res.ncol = mat.cols(); - - res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride(); - res.storage.values = mat.data(); - return res; - } - - template - static SluMatrix Map(SparseMatrixBase& mat) - { - SluMatrix res; - if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) - { - res.setStorageType(SLU_NR); - res.nrow = mat.cols(); - res.ncol = mat.rows(); - } - else - { - res.setStorageType(SLU_NC); - res.nrow = mat.rows(); - res.ncol = mat.cols(); - } - - res.Mtype = SLU_GE; - - res.storage.nnz = mat.nonZeros(); - res.storage.values = mat.derived()._valuePtr(); - res.storage.innerInd = mat.derived()._innerIndexPtr(); - res.storage.outerInd = mat.derived()._outerIndexPtr(); - - res.setScalarType(); - - // FIXME the following is not very accurate - if (MatrixType::Flags & Upper) - res.Mtype = SLU_TRU; - if (MatrixType::Flags & Lower) - res.Mtype = SLU_TRL; - if (MatrixType::Flags & SelfAdjoint) - eigen_assert(false && "SelfAdjoint matrix shape not supported by SuperLU"); - return res; - } -}; - -template -struct SluMatrixMapHelper > -{ - typedef Matrix MatrixType; - static void run(MatrixType& mat, SluMatrix& res) - { - eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); - res.setStorageType(SLU_DN); - res.setScalarType(); - res.Mtype = SLU_GE; - - res.nrow = mat.rows(); - res.ncol = mat.cols(); - - res.storage.lda = mat.outerStride(); - res.storage.values = mat.data(); - } -}; - -template -struct SluMatrixMapHelper > -{ - typedef Derived MatrixType; - static void run(MatrixType& mat, SluMatrix& res) - { - if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) - { - res.setStorageType(SLU_NR); - res.nrow = mat.cols(); - res.ncol = mat.rows(); - } - else - { - res.setStorageType(SLU_NC); - res.nrow = mat.rows(); - res.ncol = mat.cols(); - } - - res.Mtype = SLU_GE; - - res.storage.nnz = mat.nonZeros(); - res.storage.values = mat._valuePtr(); - res.storage.innerInd = mat._innerIndexPtr(); - res.storage.outerInd = mat._outerIndexPtr(); - - res.setScalarType(); - - // FIXME the following is not very accurate - if (MatrixType::Flags & Upper) - res.Mtype = SLU_TRU; - if (MatrixType::Flags & Lower) - res.Mtype = SLU_TRL; - if (MatrixType::Flags & SelfAdjoint) - eigen_assert(false && "SelfAdjoint matrix shape not supported by SuperLU"); - } -}; - -namespace internal { - -template -SluMatrix asSluMatrix(MatrixType& mat) -{ - return SluMatrix::Map(mat); -} - -/** View a Super LU matrix as an Eigen expression */ -template -MappedSparseMatrix map_superlu(SluMatrix& sluMat) -{ - eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR - || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC); - - Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow; - - return MappedSparseMatrix( - sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize], - sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast(sluMat.storage.values) ); -} - -} // end namespace internal - -template -class SparseLU : public SparseLU -{ - protected: - typedef SparseLU Base; - typedef typename Base::Scalar Scalar; - typedef typename Base::RealScalar RealScalar; - typedef Matrix Vector; - typedef Matrix IntRowVectorType; - typedef Matrix IntColVectorType; - typedef SparseMatrix LMatrixType; - typedef SparseMatrix UMatrixType; - using Base::m_flags; - using Base::m_status; - - public: - - SparseLU(int flags = NaturalOrdering) - : Base(flags) - { - } - - SparseLU(const MatrixType& matrix, int flags = NaturalOrdering) - : Base(flags) - { - compute(matrix); - } - - ~SparseLU() - { - Destroy_SuperNode_Matrix(&m_sluL); - Destroy_CompCol_Matrix(&m_sluU); - } - - inline const LMatrixType& matrixL() const - { - if (m_extractedDataAreDirty) extractData(); - return m_l; - } - - inline const UMatrixType& matrixU() const - { - if (m_extractedDataAreDirty) extractData(); - return m_u; - } - - inline const IntColVectorType& permutationP() const - { - if (m_extractedDataAreDirty) extractData(); - return m_p; - } - - inline const IntRowVectorType& permutationQ() const - { - if (m_extractedDataAreDirty) extractData(); - return m_q; - } - - Scalar determinant() const; - - template - bool solve(const MatrixBase &b, MatrixBase* x, const int transposed = SvNoTrans) const; - - void compute(const MatrixType& matrix); - - protected: - - void extractData() const; - - protected: - // cached data to reduce reallocation, etc. - mutable LMatrixType m_l; - mutable UMatrixType m_u; - mutable IntColVectorType m_p; - mutable IntRowVectorType m_q; - - mutable SparseMatrix m_matrix; - mutable SluMatrix m_sluA; - mutable SuperMatrix m_sluL, m_sluU; - mutable SluMatrix m_sluB, m_sluX; - mutable SuperLUStat_t m_sluStat; - mutable superlu_options_t m_sluOptions; - mutable std::vector m_sluEtree; - mutable std::vector m_sluRscale, m_sluCscale; - mutable std::vector m_sluFerr, m_sluBerr; - mutable char m_sluEqued; - mutable bool m_extractedDataAreDirty; -}; - -template -void SparseLU::compute(const MatrixType& a) -{ - const int size = a.rows(); - m_matrix = a; - - set_default_options(&m_sluOptions); - m_sluOptions.ColPerm = NATURAL; - m_sluOptions.PrintStat = NO; - m_sluOptions.ConditionNumber = NO; - m_sluOptions.Trans = NOTRANS; - // m_sluOptions.Equil = NO; - - switch (Base::orderingMethod()) - { - case NaturalOrdering : m_sluOptions.ColPerm = NATURAL; break; - case MinimumDegree_AT_PLUS_A : m_sluOptions.ColPerm = MMD_AT_PLUS_A; break; - case MinimumDegree_ATA : m_sluOptions.ColPerm = MMD_ATA; break; - case ColApproxMinimumDegree : m_sluOptions.ColPerm = COLAMD; break; - default: - //std::cerr << "Eigen: ordering method \"" << Base::orderingMethod() << "\" not supported by the SuperLU backend\n"; - m_sluOptions.ColPerm = NATURAL; - }; - - m_sluA = internal::asSluMatrix(m_matrix); - memset(&m_sluL,0,sizeof m_sluL); - memset(&m_sluU,0,sizeof m_sluU); - //m_sluEqued = 'B'; - int info = 0; - - m_p.resize(size); - m_q.resize(size); - m_sluRscale.resize(size); - m_sluCscale.resize(size); - m_sluEtree.resize(size); - - RealScalar recip_pivot_gross, rcond; - RealScalar ferr, berr; - - // set empty B and X - m_sluB.setStorageType(SLU_DN); - m_sluB.setScalarType(); - m_sluB.Mtype = SLU_GE; - m_sluB.storage.values = 0; - m_sluB.nrow = m_sluB.ncol = 0; - m_sluB.storage.lda = size; - m_sluX = m_sluB; - - StatInit(&m_sluStat); - if (m_flags&IncompleteFactorization) - { - #ifdef EIGEN_SUPERLU_HAS_ILU - ilu_set_default_options(&m_sluOptions); - - // no attempt to preserve column sum - m_sluOptions.ILU_MILU = SILU; - - // only basic ILU(k) support -- no direct control over memory consumption - // better to use ILU_DropRule = DROP_BASIC | DROP_AREA - // and set ILU_FillFactor to max memory growth - m_sluOptions.ILU_DropRule = DROP_BASIC; - m_sluOptions.ILU_DropTol = Base::m_precision; - - SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], - &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], - &m_sluL, &m_sluU, - NULL, 0, - &m_sluB, &m_sluX, - &recip_pivot_gross, &rcond, - &m_sluStat, &info, Scalar()); - #else - //std::cerr << "Incomplete factorization is only available in SuperLU v4\n"; - Base::m_succeeded = false; - return; - #endif - } - else - { - SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], - &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], - &m_sluL, &m_sluU, - NULL, 0, - &m_sluB, &m_sluX, - &recip_pivot_gross, &rcond, - &ferr, &berr, - &m_sluStat, &info, Scalar()); - } - StatFree(&m_sluStat); - - m_extractedDataAreDirty = true; - - // FIXME how to better check for errors ??? - Base::m_succeeded = (info == 0); -} - -template -template -bool SparseLU::solve(const MatrixBase &b, - MatrixBase *x, const int transposed) const -{ - const int size = m_matrix.rows(); - const int rhsCols = b.cols(); - eigen_assert(size==b.rows()); - - switch (transposed) { - case SvNoTrans : m_sluOptions.Trans = NOTRANS; break; - case SvTranspose : m_sluOptions.Trans = TRANS; break; - case SvAdjoint : m_sluOptions.Trans = CONJ; break; - default: - //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the SuperLU backend\n"; - m_sluOptions.Trans = NOTRANS; - } - - m_sluOptions.Fact = FACTORED; - m_sluOptions.IterRefine = NOREFINE; - - m_sluFerr.resize(rhsCols); - m_sluBerr.resize(rhsCols); - m_sluB = SluMatrix::Map(b.const_cast_derived()); - m_sluX = SluMatrix::Map(x->derived()); - - StatInit(&m_sluStat); - int info = 0; - RealScalar recip_pivot_gross, rcond; - - if (m_flags&IncompleteFactorization) - { - #ifdef EIGEN_SUPERLU_HAS_ILU - SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], - &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], - &m_sluL, &m_sluU, - NULL, 0, - &m_sluB, &m_sluX, - &recip_pivot_gross, &rcond, - &m_sluStat, &info, Scalar()); - #else - //std::cerr << "Incomplete factorization is only available in SuperLU v4\n"; - return false; - #endif - } - else - { - SuperLU_gssvx( - &m_sluOptions, &m_sluA, - m_q.data(), m_p.data(), - &m_sluEtree[0], &m_sluEqued, - &m_sluRscale[0], &m_sluCscale[0], - &m_sluL, &m_sluU, - NULL, 0, - &m_sluB, &m_sluX, - &recip_pivot_gross, &rcond, - &m_sluFerr[0], &m_sluBerr[0], - &m_sluStat, &info, Scalar()); - } - StatFree(&m_sluStat); - - // reset to previous state - m_sluOptions.Trans = NOTRANS; - return info==0; -} - -// -// the code of this extractData() function has been adapted from the SuperLU's Matlab support code, -// -// Copyright (c) 1994 by Xerox Corporation. All rights reserved. -// -// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY -// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. -// -template -void SparseLU::extractData() const -{ - if (m_extractedDataAreDirty) - { - int upper; - int fsupc, istart, nsupr; - int lastl = 0, lastu = 0; - SCformat *Lstore = static_cast(m_sluL.Store); - NCformat *Ustore = static_cast(m_sluU.Store); - Scalar *SNptr; - - const int size = m_matrix.rows(); - m_l.resize(size,size); - m_l.resizeNonZeros(Lstore->nnz); - m_u.resize(size,size); - m_u.resizeNonZeros(Ustore->nnz); - - int* Lcol = m_l._outerIndexPtr(); - int* Lrow = m_l._innerIndexPtr(); - Scalar* Lval = m_l._valuePtr(); - - int* Ucol = m_u._outerIndexPtr(); - int* Urow = m_u._innerIndexPtr(); - Scalar* Uval = m_u._valuePtr(); - - Ucol[0] = 0; - Ucol[0] = 0; - - /* for each supernode */ - for (int k = 0; k <= Lstore->nsuper; ++k) - { - fsupc = L_FST_SUPC(k); - istart = L_SUB_START(fsupc); - nsupr = L_SUB_START(fsupc+1) - istart; - upper = 1; - - /* for each column in the supernode */ - for (int j = fsupc; j < L_FST_SUPC(k+1); ++j) - { - SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)]; - - /* Extract U */ - for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i) - { - Uval[lastu] = ((Scalar*)Ustore->nzval)[i]; - /* Matlab doesn't like explicit zero. */ - if (Uval[lastu] != 0.0) - Urow[lastu++] = U_SUB(i); - } - for (int i = 0; i < upper; ++i) - { - /* upper triangle in the supernode */ - Uval[lastu] = SNptr[i]; - /* Matlab doesn't like explicit zero. */ - if (Uval[lastu] != 0.0) - Urow[lastu++] = L_SUB(istart+i); - } - Ucol[j+1] = lastu; - - /* Extract L */ - Lval[lastl] = 1.0; /* unit diagonal */ - Lrow[lastl++] = L_SUB(istart + upper - 1); - for (int i = upper; i < nsupr; ++i) - { - Lval[lastl] = SNptr[i]; - /* Matlab doesn't like explicit zero. */ - if (Lval[lastl] != 0.0) - Lrow[lastl++] = L_SUB(istart+i); - } - Lcol[j+1] = lastl; - - ++upper; - } /* for j ... */ - - } /* for k ... */ - - // squeeze the matrices : - m_l.resizeNonZeros(lastl); - m_u.resizeNonZeros(lastu); - - m_extractedDataAreDirty = false; - } -} - -template -typename SparseLU::Scalar SparseLU::determinant() const -{ - assert((!NumTraits::IsComplex) && "This function is not implemented for complex yet"); - if (m_extractedDataAreDirty) - extractData(); - - // TODO this code could be moved to the default/base backend - // FIXME perhaps we have to take into account the scale factors m_sluRscale and m_sluCscale ??? - Scalar det = Scalar(1); - for (int j=0; j 0) - { - int lastId = m_u._outerIndexPtr()[j+1]-1; - eigen_assert(m_u._innerIndexPtr()[lastId]<=j); - if (m_u._innerIndexPtr()[lastId]==j) - { - det *= m_u._valuePtr()[lastId]; - } - } -// std::cout << m_sluRscale[j] << " " << m_sluCscale[j] << " \n"; - } - return det; -} - -#endif // EIGEN_SUPERLUSUPPORT_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h deleted file mode 100644 index beb18f6cd..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h +++ /dev/null @@ -1,350 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2009 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#ifndef EIGEN_UMFPACKSUPPORT_H -#define EIGEN_UMFPACKSUPPORT_H - -/* TODO extract L, extract U, compute det, etc... */ - -// generic double/complex wrapper functions: - -inline void umfpack_free_numeric(void **Numeric, double) -{ umfpack_di_free_numeric(Numeric); } - -inline void umfpack_free_numeric(void **Numeric, std::complex) -{ umfpack_zi_free_numeric(Numeric); } - -inline void umfpack_free_symbolic(void **Symbolic, double) -{ umfpack_di_free_symbolic(Symbolic); } - -inline void umfpack_free_symbolic(void **Symbolic, std::complex) -{ umfpack_zi_free_symbolic(Symbolic); } - -inline int umfpack_symbolic(int n_row,int n_col, - const int Ap[], const int Ai[], const double Ax[], void **Symbolic, - const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) -{ - return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info); -} - -inline int umfpack_symbolic(int n_row,int n_col, - const int Ap[], const int Ai[], const std::complex Ax[], void **Symbolic, - const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) -{ - return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Control,Info); -} - -inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[], - void *Symbolic, void **Numeric, - const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) -{ - return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info); -} - -inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex Ax[], - void *Symbolic, void **Numeric, - const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) -{ - return umfpack_zi_numeric(Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info); -} - -inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[], - double X[], const double B[], void *Numeric, - const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) -{ - return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info); -} - -inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex Ax[], - std::complex X[], const std::complex B[], void *Numeric, - const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) -{ - return umfpack_zi_solve(sys,Ap,Ai,&internal::real_ref(Ax[0]),0,&internal::real_ref(X[0]),0,&internal::real_ref(B[0]),0,Numeric,Control,Info); -} - -inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double) -{ - return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); -} - -inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex) -{ - return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); -} - -inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[], - int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric) -{ - return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric); -} - -inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex Lx[], int Up[], int Ui[], std::complex Ux[], - int P[], int Q[], std::complex Dx[], int *do_recip, double Rs[], void *Numeric) -{ - double& lx0_real = internal::real_ref(Lx[0]); - double& ux0_real = internal::real_ref(Ux[0]); - double& dx0_real = internal::real_ref(Dx[0]); - return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q, - Dx?&dx0_real:0,0,do_recip,Rs,Numeric); -} - -inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) -{ - return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info); -} - -inline int umfpack_get_determinant(std::complex *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) -{ - double& mx_real = internal::real_ref(*Mx); - return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info); -} - - -template -class SparseLU<_MatrixType,UmfPack> : public SparseLU<_MatrixType> -{ - protected: - typedef SparseLU<_MatrixType> Base; - typedef typename Base::Scalar Scalar; - typedef typename Base::RealScalar RealScalar; - typedef Matrix Vector; - typedef Matrix IntRowVectorType; - typedef Matrix IntColVectorType; - typedef SparseMatrix LMatrixType; - typedef SparseMatrix UMatrixType; - using Base::m_flags; - using Base::m_status; - - public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Index Index; - - SparseLU(int flags = NaturalOrdering) - : Base(flags), m_numeric(0) - { - } - - SparseLU(const MatrixType& matrix, int flags = NaturalOrdering) - : Base(flags), m_numeric(0) - { - compute(matrix); - } - - ~SparseLU() - { - if (m_numeric) - umfpack_free_numeric(&m_numeric,Scalar()); - } - - inline const LMatrixType& matrixL() const - { - if (m_extractedDataAreDirty) extractData(); - return m_l; - } - - inline const UMatrixType& matrixU() const - { - if (m_extractedDataAreDirty) extractData(); - return m_u; - } - - inline const IntColVectorType& permutationP() const - { - if (m_extractedDataAreDirty) extractData(); - return m_p; - } - - inline const IntRowVectorType& permutationQ() const - { - if (m_extractedDataAreDirty) extractData(); - return m_q; - } - - Scalar determinant() const; - - template - bool solve(const MatrixBase &b, MatrixBase* x) const; - - template - inline const internal::solve_retval, Rhs> - solve(const MatrixBase& b) const - { - eigen_assert(true && "SparseLU is not initialized."); - return internal::solve_retval, Rhs>(*this, b.derived()); - } - - void compute(const MatrixType& matrix); - - inline Index cols() const { return m_matrixRef->cols(); } - inline Index rows() const { return m_matrixRef->rows(); } - - inline const MatrixType& matrixLU() const - { - //eigen_assert(m_isInitialized && "LU is not initialized."); - return *m_matrixRef; - } - - const void* numeric() const - { - return m_numeric; - } - - protected: - - void extractData() const; - - protected: - // cached data: - void* m_numeric; - const MatrixType* m_matrixRef; - mutable LMatrixType m_l; - mutable UMatrixType m_u; - mutable IntColVectorType m_p; - mutable IntRowVectorType m_q; - mutable bool m_extractedDataAreDirty; -}; - -namespace internal { - -template - struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef SparseLU<_MatrixType, UmfPack> SpLUDecType; - EIGEN_MAKE_SOLVE_HELPERS(SpLUDecType,Rhs) - - template void evalTo(Dest& dst) const - { - const int rhsCols = rhs().cols(); - - eigen_assert((Rhs::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet"); - eigen_assert((Dest::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet"); - - void* numeric = const_cast(dec().numeric()); - - EIGEN_UNUSED int errorCode = 0; - for (int j=0; j -void SparseLU::compute(const MatrixType& a) -{ - typedef typename MatrixType::Index Index; - const Index rows = a.rows(); - const Index cols = a.cols(); - eigen_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet"); - - m_matrixRef = &a; - - if (m_numeric) - umfpack_free_numeric(&m_numeric,Scalar()); - - void* symbolic; - int errorCode = 0; - errorCode = umfpack_symbolic(rows, cols, a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(), - &symbolic, 0, 0); - if (errorCode==0) - errorCode = umfpack_numeric(a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(), - symbolic, &m_numeric, 0, 0); - - umfpack_free_symbolic(&symbolic,Scalar()); - - m_extractedDataAreDirty = true; - - Base::m_succeeded = (errorCode==0); -} - -template -void SparseLU::extractData() const -{ - if (m_extractedDataAreDirty) - { - // get size of the data - int lnz, unz, rows, cols, nz_udiag; - umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar()); - - // allocate data - m_l.resize(rows,(std::min)(rows,cols)); - m_l.resizeNonZeros(lnz); - - m_u.resize((std::min)(rows,cols),cols); - m_u.resizeNonZeros(unz); - - m_p.resize(rows); - m_q.resize(cols); - - // extract - umfpack_get_numeric(m_l._outerIndexPtr(), m_l._innerIndexPtr(), m_l._valuePtr(), - m_u._outerIndexPtr(), m_u._innerIndexPtr(), m_u._valuePtr(), - m_p.data(), m_q.data(), 0, 0, 0, m_numeric); - - m_extractedDataAreDirty = false; - } -} - -template -typename SparseLU::Scalar SparseLU::determinant() const -{ - Scalar det; - umfpack_get_determinant(&det, 0, m_numeric, 0); - return det; -} - -template -template -bool SparseLU::solve(const MatrixBase &b, MatrixBase *x) const -{ - //const int size = m_matrix.rows(); - const int rhsCols = b.cols(); -// eigen_assert(size==b.rows()); - eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet"); - eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet"); - - int errorCode; - for (int j=0; j_outerIndexPtr(), m_matrixRef->_innerIndexPtr(), m_matrixRef->_valuePtr(), - &x->col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0); - if (errorCode!=0) - return false; - } -// errorCode = umfpack_di_solve(UMFPACK_A, -// m_matrixRef._outerIndexPtr(), m_matrixRef._innerIndexPtr(), m_matrixRef._valuePtr(), -// x->derived().data(), b.derived().data(), m_numeric, 0, 0); - - return true; -} - -#endif // EIGEN_UMFPACKSUPPORT_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/CMakeLists.txt b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/CMakeLists.txt new file mode 100644 index 000000000..55c6271e9 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_Splines_SRCS "*.h") + +INSTALL(FILES + ${Eigen_Splines_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Splines COMPONENT Devel + ) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/Spline.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/Spline.h new file mode 100644 index 000000000..4c06453f7 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/Spline.h @@ -0,0 +1,479 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPLINE_H +#define EIGEN_SPLINE_H + +#include "SplineFwd.h" + +namespace Eigen +{ + /** + * \ingroup Splines_Module + * \class Spline class + * \brief A class representing multi-dimensional spline curves. + * + * The class represents B-splines with non-uniform knot vectors. Each control + * point of the B-spline is associated with a basis function + * \f{align*} + * C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i + * \f} + * + * \tparam _Scalar The underlying data type (typically float or double) + * \tparam _Dim The curve dimension (e.g. 2 or 3) + * \tparam _Degree Per default set to Dynamic; could be set to the actual desired + * degree for optimization purposes (would result in stack allocation + * of several temporary variables). + **/ + template + class Spline + { + public: + typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ + enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; + enum { Degree = _Degree /*!< The spline curve's degree. */ }; + + /** \brief The point type the spline is representing. */ + typedef typename SplineTraits::PointType PointType; + + /** \brief The data type used to store knot vectors. */ + typedef typename SplineTraits::KnotVectorType KnotVectorType; + + /** \brief The data type used to store non-zero basis functions. */ + typedef typename SplineTraits::BasisVectorType BasisVectorType; + + /** \brief The data type representing the spline's control points. */ + typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; + + /** + * \brief Creates a spline from a knot vector and control points. + * \param knots The spline's knot vector. + * \param ctrls The spline's control point vector. + **/ + template + Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {} + + /** + * \brief Copy constructor for splines. + * \param spline The input spline. + **/ + template + Spline(const Spline& spline) : + m_knots(spline.knots()), m_ctrls(spline.ctrls()) {} + + /** + * \brief Returns the knots of the underlying spline. + **/ + const KnotVectorType& knots() const { return m_knots; } + + /** + * \brief Returns the knots of the underlying spline. + **/ + const ControlPointVectorType& ctrls() const { return m_ctrls; } + + /** + * \brief Returns the spline value at a given site \f$u\f$. + * + * The function returns + * \f{align*} + * C(u) & = \sum_{i=0}^{n}N_{i,p}P_i + * \f} + * + * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated. + * \return The spline value at the given location \f$u\f$. + **/ + PointType operator()(Scalar u) const; + + /** + * \brief Evaluation of spline derivatives of up-to given order. + * + * The function returns + * \f{align*} + * \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i + * \f} + * for i ranging between 0 and order. + * + * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated. + * \param order The order up to which the derivatives are computed. + **/ + typename SplineTraits::DerivativeType + derivatives(Scalar u, DenseIndex order) const; + + /** + * \copydoc Spline::derivatives + * Using the template version of this function is more efficieent since + * temporary objects are allocated on the stack whenever this is possible. + **/ + template + typename SplineTraits::DerivativeType + derivatives(Scalar u, DenseIndex order = DerivativeOrder) const; + + /** + * \brief Computes the non-zero basis functions at the given site. + * + * Splines have local support and a point from their image is defined + * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the + * spline degree. + * + * This function computes the \f$p+1\f$ non-zero basis function values + * for a given parameter value \f$u\f$. It returns + * \f{align*}{ + * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) + * \f} + * + * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions + * are computed. + **/ + typename SplineTraits::BasisVectorType + basisFunctions(Scalar u) const; + + /** + * \brief Computes the non-zero spline basis function derivatives up to given order. + * + * The function computes + * \f{align*}{ + * \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u) + * \f} + * with i ranging from 0 up to the specified order. + * + * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function + * derivatives are computed. + * \param order The order up to which the basis function derivatives are computes. + **/ + typename SplineTraits::BasisDerivativeType + basisFunctionDerivatives(Scalar u, DenseIndex order) const; + + /** + * \copydoc Spline::basisFunctionDerivatives + * Using the template version of this function is more efficieent since + * temporary objects are allocated on the stack whenever this is possible. + **/ + template + typename SplineTraits::BasisDerivativeType + basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const; + + /** + * \brief Returns the spline degree. + **/ + DenseIndex degree() const; + + /** + * \brief Returns the span within the knot vector in which u is falling. + * \param u The site for which the span is determined. + **/ + DenseIndex span(Scalar u) const; + + /** + * \brief Computes the spang within the provided knot vector in which u is falling. + **/ + static DenseIndex Span(typename SplineTraits::Scalar u, DenseIndex degree, const typename SplineTraits::KnotVectorType& knots); + + /** + * \brief Returns the spline's non-zero basis functions. + * + * The function computes and returns + * \f{align*}{ + * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) + * \f} + * + * \param u The site at which the basis functions are computed. + * \param degree The degree of the underlying spline. + * \param knots The underlying spline's knot vector. + **/ + static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots); + + + private: + KnotVectorType m_knots; /*!< Knot vector. */ + ControlPointVectorType m_ctrls; /*!< Control points. */ + }; + + template + DenseIndex Spline<_Scalar, _Dim, _Degree>::Span( + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u, + DenseIndex degree, + const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots) + { + // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68) + if (u <= knots(0)) return degree; + const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u); + return static_cast( std::distance(knots.data(), pos) - 1 ); + } + + template + typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType + Spline<_Scalar, _Dim, _Degree>::BasisFunctions( + typename Spline<_Scalar, _Dim, _Degree>::Scalar u, + DenseIndex degree, + const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots) + { + typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType; + + const DenseIndex p = degree; + const DenseIndex i = Spline::Span(u, degree, knots); + + const KnotVectorType& U = knots; + + BasisVectorType left(p+1); left(0) = Scalar(0); + BasisVectorType right(p+1); right(0) = Scalar(0); + + VectorBlock(left,1,p) = u - VectorBlock(U,i+1-p,p).reverse(); + VectorBlock(right,1,p) = VectorBlock(U,i+1,p) - u; + + BasisVectorType N(1,p+1); + N(0) = Scalar(1); + for (DenseIndex j=1; j<=p; ++j) + { + Scalar saved = Scalar(0); + for (DenseIndex r=0; r + DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const + { + if (_Degree == Dynamic) + return m_knots.size() - m_ctrls.cols() - 1; + else + return _Degree; + } + + template + DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const + { + return Spline::Span(u, degree(), knots()); + } + + template + typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const + { + enum { Order = SplineTraits::OrderAtCompileTime }; + + const DenseIndex span = this->span(u); + const DenseIndex p = degree(); + const BasisVectorType basis_funcs = basisFunctions(u); + + const Replicate ctrl_weights(basis_funcs); + const Block ctrl_pts(ctrls(),0,span-p,Dimension,p+1); + return (ctrl_weights * ctrl_pts).rowwise().sum(); + } + + /* --------------------------------------------------------------------------------------------- */ + + template + void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der) + { + enum { Dimension = SplineTraits::Dimension }; + enum { Order = SplineTraits::OrderAtCompileTime }; + enum { DerivativeOrder = DerivativeType::ColsAtCompileTime }; + + typedef typename SplineTraits::Scalar Scalar; + + typedef typename SplineTraits::BasisVectorType BasisVectorType; + typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; + + typedef typename SplineTraits::BasisDerivativeType BasisDerivativeType; + typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr; + + const DenseIndex p = spline.degree(); + const DenseIndex span = spline.span(u); + + const DenseIndex n = (std::min)(p, order); + + der.resize(Dimension,n+1); + + // Retrieve the basis function derivatives up to the desired order... + const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives(u, n+1); + + // ... and perform the linear combinations of the control points. + for (DenseIndex der_order=0; der_order ctrl_weights( basis_func_ders.row(der_order) ); + const Block ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1); + der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum(); + } + } + + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType + Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline >::DerivativeType res; + derivativesImpl(*this, u, order, res); + return res; + } + + template + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType + Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res; + derivativesImpl(*this, u, order, res); + return res; + } + + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType + Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const + { + return Spline::BasisFunctions(u, degree(), knots()); + } + + /* --------------------------------------------------------------------------------------------- */ + + template + void basisFunctionDerivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& N_) + { + enum { Order = SplineTraits::OrderAtCompileTime }; + + typedef typename SplineTraits::Scalar Scalar; + typedef typename SplineTraits::BasisVectorType BasisVectorType; + typedef typename SplineTraits::KnotVectorType KnotVectorType; + typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; + + const KnotVectorType& U = spline.knots(); + + const DenseIndex p = spline.degree(); + const DenseIndex span = spline.span(u); + + const DenseIndex n = (std::min)(p, order); + + N_.resize(n+1, p+1); + + BasisVectorType left = BasisVectorType::Zero(p+1); + BasisVectorType right = BasisVectorType::Zero(p+1); + + Matrix ndu(p+1,p+1); + + double saved, temp; + + ndu(0,0) = 1.0; + + DenseIndex j; + for (j=1; j<=p; ++j) + { + left[j] = u-U[span+1-j]; + right[j] = U[span+j]-u; + saved = 0.0; + + for (DenseIndex r=0; r(saved+right[r+1] * temp); + saved = left[j-r] * temp; + } + + ndu(j,j) = static_cast(saved); + } + + for (j = p; j>=0; --j) + N_(0,j) = ndu(j,p); + + // Compute the derivatives + DerivativeType a(n+1,p+1); + DenseIndex r=0; + for (; r<=p; ++r) + { + DenseIndex s1,s2; + s1 = 0; s2 = 1; // alternate rows in array a + a(0,0) = 1.0; + + // Compute the k-th derivative + for (DenseIndex k=1; k<=static_cast(n); ++k) + { + double d = 0.0; + DenseIndex rk,pk,j1,j2; + rk = r-k; pk = p-k; + + if (r>=k) + { + a(s2,0) = a(s1,0)/ndu(pk+1,rk); + d = a(s2,0)*ndu(rk,pk); + } + + if (rk>=-1) j1 = 1; + else j1 = -rk; + + if (r-1 <= pk) j2 = k-1; + else j2 = p-r; + + for (j=j1; j<=j2; ++j) + { + a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j); + d += a(s2,j)*ndu(rk+j,pk); + } + + if (r<=pk) + { + a(s2,k) = -a(s1,k-1)/ndu(pk+1,r); + d += a(s2,k)*ndu(r,pk); + } + + N_(k,r) = static_cast(d); + j = s1; s1 = s2; s2 = j; // Switch rows + } + } + + /* Multiply through by the correct factors */ + /* (Eq. [2.9]) */ + r = p; + for (DenseIndex k=1; k<=static_cast(n); ++k) + { + for (DenseIndex j=p; j>=0; --j) N_(k,j) *= r; + r *= p-k; + } + } + + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType + Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline >::BasisDerivativeType der; + basisFunctionDerivativesImpl(*this, u, order, der); + return der; + } + + template + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType + Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline, DerivativeOrder >::BasisDerivativeType der; + basisFunctionDerivativesImpl(*this, u, order, der); + return der; + } +} + +#endif // EIGEN_SPLINE_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFitting.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFitting.h new file mode 100644 index 000000000..3e8abbbce --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFitting.h @@ -0,0 +1,174 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPLINE_FITTING_H +#define EIGEN_SPLINE_FITTING_H + +#include + +#include "SplineFwd.h" + +#include + +namespace Eigen +{ + /** + * \brief Computes knot averages. + * \ingroup Splines_Module + * + * The knots are computed as + * \f{align*} + * u_0 & = \hdots = u_p = 0 \\ + * u_{m-p} & = \hdots = u_{m} = 1 \\ + * u_{j+p} & = \frac{1}{p}\sum_{i=j}^{j+p-1}\bar{u}_i \quad\quad j=1,\hdots,n-p + * \f} + * where \f$p\f$ is the degree and \f$m+1\f$ the number knots + * of the desired interpolating spline. + * + * \param[in] parameters The input parameters. During interpolation one for each data point. + * \param[in] degree The spline degree which is used during the interpolation. + * \param[out] knots The output knot vector. + * + * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data + **/ + template + void KnotAveraging(const KnotVectorType& parameters, DenseIndex degree, KnotVectorType& knots) + { + typedef typename KnotVectorType::Scalar Scalar; + + knots.resize(parameters.size()+degree+1); + + for (DenseIndex j=1; j + void ChordLengths(const PointArrayType& pts, KnotVectorType& chord_lengths) + { + typedef typename KnotVectorType::Scalar Scalar; + + const DenseIndex n = pts.cols(); + + // 1. compute the column-wise norms + chord_lengths.resize(pts.cols()); + chord_lengths[0] = 0; + chord_lengths.rightCols(n-1) = (pts.array().leftCols(n-1) - pts.array().rightCols(n-1)).matrix().colwise().norm(); + + // 2. compute the partial sums + std::partial_sum(chord_lengths.data(), chord_lengths.data()+n, chord_lengths.data()); + + // 3. normalize the data + chord_lengths /= chord_lengths(n-1); + chord_lengths(n-1) = Scalar(1); + } + + /** + * \brief Spline fitting methods. + * \ingroup Splines_Module + **/ + template + struct SplineFitting + { + typedef typename SplineType::KnotVectorType KnotVectorType; + + /** + * \brief Fits an interpolating Spline to the given data points. + * + * \param pts The points for which an interpolating spline will be computed. + * \param degree The degree of the interpolating spline. + * + * \returns A spline interpolating the initially provided points. + **/ + template + static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree); + + /** + * \brief Fits an interpolating Spline to the given data points. + * + * \param pts The points for which an interpolating spline will be computed. + * \param degree The degree of the interpolating spline. + * \param knot_parameters The knot parameters for the interpolation. + * + * \returns A spline interpolating the initially provided points. + **/ + template + static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters); + }; + + template + template + SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters) + { + typedef typename SplineType::KnotVectorType::Scalar Scalar; + typedef typename SplineType::BasisVectorType BasisVectorType; + typedef typename SplineType::ControlPointVectorType ControlPointVectorType; + + typedef Matrix MatrixType; + + KnotVectorType knots; + KnotAveraging(knot_parameters, degree, knots); + + DenseIndex n = pts.cols(); + MatrixType A = MatrixType::Zero(n,n); + for (DenseIndex i=1; i qr(A); + + // Here, we are creating a temporary due to an Eigen issue. + ControlPointVectorType ctrls = qr.solve(MatrixType(pts.transpose())).transpose(); + + return SplineType(knots, ctrls); + } + + template + template + SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree) + { + KnotVectorType chord_lengths; // knot parameters + ChordLengths(pts, chord_lengths); + return Interpolate(pts, degree, chord_lengths); + } +} + +#endif // EIGEN_SPLINE_FITTING_H diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h new file mode 100644 index 000000000..0119115a6 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h @@ -0,0 +1,101 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPLINES_FWD_H +#define EIGEN_SPLINES_FWD_H + +#include + +namespace Eigen +{ + template class Spline; + + template < typename SplineType, int DerivativeOrder = Dynamic > struct SplineTraits {}; + + /** + * \ingroup Splines_Module + * \brief Compile-time attributes of the Spline class for Dynamic degree. + **/ + template + struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, Dynamic > + { + typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ + enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; + enum { Degree = _Degree /*!< The spline curve's degree. */ }; + + enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; + enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ }; + + /** \brief The data type used to store non-zero basis functions. */ + typedef Array BasisVectorType; + + /** \brief The data type used to store the values of the basis function derivatives. */ + typedef Array BasisDerivativeType; + + /** \brief The data type used to store the spline's derivative values. */ + typedef Array DerivativeType; + + /** \brief The point type the spline is representing. */ + typedef Array PointType; + + /** \brief The data type used to store knot vectors. */ + typedef Array KnotVectorType; + + /** \brief The data type representing the spline's control points. */ + typedef Array ControlPointVectorType; + }; + + /** + * \ingroup Splines_Module + * \brief Compile-time attributes of the Spline class for fixed degree. + * + * The traits class inherits all attributes from the SplineTraits of Dynamic degree. + **/ + template < typename _Scalar, int _Dim, int _Degree, int _DerivativeOrder > + struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, _DerivativeOrder > : public SplineTraits< Spline<_Scalar, _Dim, _Degree> > + { + enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; + enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ }; + + /** \brief The data type used to store the values of the basis function derivatives. */ + typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType; + + /** \brief The data type used to store the spline's derivative values. */ + typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; + }; + + /** \brief 2D float B-spline with dynamic degree. */ + typedef Spline Spline2f; + + /** \brief 3D float B-spline with dynamic degree. */ + typedef Spline Spline3f; + + /** \brief 2D double B-spline with dynamic degree. */ + typedef Spline Spline2d; + + /** \brief 3D double B-spline with dynamic degree. */ + typedef Spline Spline3d; +} + +#endif // EIGEN_SPLINES_FWD_H diff --git a/gtsam/3rdparty/Eigen/unsupported/doc/Doxyfile.in b/gtsam/3rdparty/Eigen/unsupported/doc/Doxyfile.in index 7d5f24b4e..1facf2985 100644 --- a/gtsam/3rdparty/Eigen/unsupported/doc/Doxyfile.in +++ b/gtsam/3rdparty/Eigen/unsupported/doc/Doxyfile.in @@ -203,7 +203,6 @@ ALIASES = "only_for_vectors=This is only for vectors (either row- "svd_module=This is defined in the %SVD module. \code #include \endcode" \ "geometry_module=This is defined in the %Geometry module. \code #include \endcode" \ "label=\bug" \ - "redstar=*" \ "nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"" # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C @@ -576,13 +575,14 @@ FILE_PATTERNS = * # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. -RECURSIVE = NO +RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. -EXCLUDE = +EXCLUDE = "${Eigen_SOURCE_DIR}/unsupported/doc/examples" \ + "${Eigen_SOURCE_DIR}/unsupported/doc/snippets" # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded @@ -958,7 +958,8 @@ PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. -EXTRA_PACKAGES = amssymb +EXTRA_PACKAGES = amssymb \ + amsmath # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until diff --git a/gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixLogarithm.cpp b/gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixLogarithm.cpp new file mode 100644 index 000000000..8c5d97054 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixLogarithm.cpp @@ -0,0 +1,15 @@ +#include +#include + +using namespace Eigen; + +int main() +{ + using std::sqrt; + MatrixXd A(3,3); + A << 0.5*sqrt(2), -0.5*sqrt(2), 0, + 0.5*sqrt(2), 0.5*sqrt(2), 0, + 0, 0, 1; + std::cout << "The matrix A is:\n" << A << "\n\n"; + std::cout << "The matrix logarithm of A is:\n" << A.log() << "\n"; +} diff --git a/gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixSquareRoot.cpp b/gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixSquareRoot.cpp new file mode 100644 index 000000000..88e7557d7 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/doc/examples/MatrixSquareRoot.cpp @@ -0,0 +1,16 @@ +#include +#include + +using namespace Eigen; + +int main() +{ + const double pi = std::acos(-1.0); + + MatrixXd A(2,2); + A << cos(pi/3), -sin(pi/3), + sin(pi/3), cos(pi/3); + std::cout << "The matrix A is:\n" << A << "\n\n"; + std::cout << "The matrix square root of A is:\n" << A.sqrt() << "\n\n"; + std::cout << "The square of the last matrix is:\n" << A.sqrt() * A.sqrt() << "\n"; +} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp b/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp index e77e84b6d..3f9d108de 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp @@ -24,9 +24,15 @@ #include "main.h" #include +#include #include -inline double SQR(double x) { return x * x; } +namespace Eigen { + +template AlignedBox bounding_box(const Matrix &v) { return AlignedBox(v); } + +} + template struct Ball @@ -41,16 +47,10 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim) VectorType center; double radius; }; - -namespace Eigen { -namespace internal { - -template AlignedBox bounding_box(const Matrix &v) { return AlignedBox(v); } template AlignedBox bounding_box(const Ball &b) { return AlignedBox(b.center.array() - b.radius, b.center.array() + b.radius); } -} // end namespace internal -} +inline double SQR(double x) { return x * x; } template struct BallPointStuff //this class provides functions to be both an intersector and a minimizer, both for a ball and a point and for two trees diff --git a/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt b/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt index f8c0ff486..b34b151b1 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt @@ -1,47 +1,6 @@ -include_directories(../../test ../../unsupported ../../Eigen) - -set(SPARSE_LIBS "") - -# configure blas/lapack -if(CMAKE_Fortran_COMPILER_WORKS) - set(BLAS_FOUND TRUE) - set(LAPACK_FOUND TRUE) - set(BLAS_LIBRARIES eigen_blas_static) - set(LAPACK_LIBRARIES eigen_lapack_static) -else() - # TODO search for default blas/lapack -endif() - -find_package(Cholmod) -if(CHOLMOD_FOUND AND BLAS_FOUND AND LAPACK_FOUND) - add_definitions("-DEIGEN_CHOLMOD_SUPPORT") - include_directories(${CHOLMOD_INCLUDES}) - set(SPARSE_LIBS ${SPARSE_LIBS} ${CHOLMOD_LIBRARIES} ${BLAS_LIBRARIES} ${LAPACK_LIBRARIES}) - ei_add_property(EIGEN_TESTED_BACKENDS "Cholmod, ") -else() - ei_add_property(EIGEN_MISSING_BACKENDS "Cholmod, ") -endif() - -find_package(Umfpack) -if(UMFPACK_FOUND AND BLAS_FOUND) - add_definitions("-DEIGEN_UMFPACK_SUPPORT") - include_directories(${UMFPACK_INCLUDES}) - set(SPARSE_LIBS ${SPARSE_LIBS} ${UMFPACK_LIBRARIES} ${BLAS_LIBRARIES}) - ei_add_property(EIGEN_TESTED_BACKENDS "UmfPack, ") -else() - ei_add_property(EIGEN_MISSING_BACKENDS "UmfPack, ") -endif() - -find_package(SuperLU) -if(SUPERLU_FOUND AND BLAS_FOUND) - add_definitions("-DEIGEN_SUPERLU_SUPPORT") - include_directories(${SUPERLU_INCLUDES}) - set(SPARSE_LIBS ${SPARSE_LIBS} ${SUPERLU_LIBRARIES} ${BLAS_LIBRARIES}) - ei_add_property(EIGEN_TESTED_BACKENDS "SuperLU, ") -else() - ei_add_property(EIGEN_MISSING_BACKENDS "SuperLU, ") -endif() +include_directories(../../test ../../unsupported ../../Eigen + ${CMAKE_CURRENT_BINARY_DIR}/../../test) find_package(GoogleHash) if(GOOGLEHASH_FOUND) @@ -74,6 +33,7 @@ endif() ei_add_test(matrix_exponential) ei_add_test(matrix_function) +ei_add_test(matrix_square_root) ei_add_test(alignedvector3) ei_add_test(FFT) @@ -88,18 +48,16 @@ else() ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ") endif() -ei_add_test(sparse_llt "" "${SPARSE_LIBS}") -ei_add_test(sparse_ldlt "" "${SPARSE_LIBS}") -ei_add_test(sparse_lu "" "${SPARSE_LIBS}") ei_add_test(sparse_extra "" "") find_package(FFTW) if(FFTW_FOUND) ei_add_property(EIGEN_TESTED_BACKENDS "fftw, ") + include_directories( ${FFTW_INCLUDES} ) if(FFTWL_LIB) - ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "fftw3;fftw3f;fftw3l" ) + ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "${FFTW_LIBRARIES}" ) else() - ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT " "fftw3;fftw3f" ) + ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT" "${FFTW_LIBRARIES}" ) endif() else() ei_add_property(EIGEN_MISSING_BACKENDS "fftw, ") @@ -121,18 +79,9 @@ else() ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ") endif() -find_package(GSL) -if(GSL_FOUND AND GSL_VERSION_MINOR LESS 9) - set(GSL_FOUND "") -endif(GSL_FOUND AND GSL_VERSION_MINOR LESS 9) -if(GSL_FOUND) - add_definitions("-DHAS_GSL" ${GSL_DEFINITIONS}) - include_directories(${GSL_INCLUDE_DIR}) - ei_add_property(EIGEN_TESTED_BACKENDS "GSL, ") -else(GSL_FOUND) - ei_add_property(EIGEN_MISSING_BACKENDS "GSL, ") - set(GSL_LIBRARIES " ") -endif(GSL_FOUND) - -ei_add_test(polynomialsolver " " "${GSL_LIBRARIES}" ) +ei_add_test(polynomialsolver) ei_add_test(polynomialutils) +ei_add_test(kronecker_product) +ei_add_test(splines) +ei_add_test(gmres) + diff --git a/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp b/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp index a32d85829..7ce4b4dee 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp @@ -28,13 +28,21 @@ template EIGEN_DONT_INLINE Scalar foo(const Scalar& x, const Scalar& y) { + using namespace std; // return x+std::sin(y); EIGEN_ASM_COMMENT("mybegin"); - return static_cast(x*2 - std::pow(x,2) + 2*std::sqrt(y*y) - 4 * std::sin(x) + 2 * std::cos(y) - std::exp(-0.5*x*x)); + return static_cast(x*2 - pow(x,2) + 2*sqrt(y*y) - 4 * sin(x) + 2 * cos(y) - exp(-0.5*x*x)); //return x+2*y*x;//x*2 -std::pow(x,2);//(2*y/x);// - y*2; EIGEN_ASM_COMMENT("myend"); } +template +EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p) +{ + typedef typename Vector::Scalar Scalar; + return (p-Vector(Scalar(-1),Scalar(1.))).norm() + (p.array() * p.array()).sum() + p.dot(p); +} + template struct TestFunc1 { @@ -140,9 +148,23 @@ void test_autodiff_scalar() typedef AutoDiffScalar AD; AD ax(1,Vector2f::UnitX()); AD ay(2,Vector2f::UnitY()); - foo(ax,ay); - std::cerr << foo(ax,ay).value() << " <> " - << foo(ax,ay).derivatives().transpose() << "\n\n"; + AD res = foo(ax,ay); + std::cerr << res.value() << " <> " + << res.derivatives().transpose() << "\n\n"; +} + +void test_autodiff_vector() +{ + std::cerr << foo(Vector2f(1,2)) << "\n"; + typedef AutoDiffScalar AD; + typedef Matrix VectorAD; + VectorAD p(AD(1),AD(-1)); + p.x().derivatives() = Vector2f::UnitX(); + p.y().derivatives() = Vector2f::UnitY(); + + AD res = foo(p); + std::cerr << res.value() << " <> " + << res.derivatives().transpose() << "\n\n"; } void test_autodiff_jacobian() @@ -159,6 +181,7 @@ void test_autodiff_jacobian() void test_autodiff() { test_autodiff_scalar(); - test_autodiff_jacobian(); + test_autodiff_vector(); +// test_autodiff_jacobian(); } diff --git a/gtsam/3rdparty/Eigen/unsupported/test/forward_adolc.cpp b/gtsam/3rdparty/Eigen/unsupported/test/forward_adolc.cpp index 1971d883b..07959a668 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/forward_adolc.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/forward_adolc.cpp @@ -23,11 +23,20 @@ // Eigen. If not, see . #include "main.h" +#include + #define NUMBER_DIRECTIONS 16 #include int adtl::ADOLC_numDir; +template +EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p) +{ + typedef typename Vector::Scalar Scalar; + return (p-Vector(Scalar(-1),Scalar(1.))).norm() + (p.array().sqrt().abs() * p.array().sin()).sum() + p.dot(p); +} + template struct TestFunc1 { @@ -138,4 +147,12 @@ void test_forward_adolc() CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1()) )); CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1(3,3)) )); } + + { + // simple instanciation tests + Matrix x; + foo(x); + Matrix A(4,4);; + A.selfadjointView().eigenvalues(); + } } diff --git a/gtsam/3rdparty/Eigen/unsupported/test/gmres.cpp b/gtsam/3rdparty/Eigen/unsupported/test/gmres.cpp new file mode 100644 index 000000000..30ebe8979 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/test/gmres.cpp @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Kolja Brix +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "../../test/sparse_solver.h" +#include + +template void test_gmres_T() +{ + GMRES, DiagonalPreconditioner > gmres_colmajor_diag; + GMRES, IdentityPreconditioner > gmres_colmajor_I; + GMRES, IncompleteLUT > gmres_colmajor_ilut; + //GMRES, SSORPreconditioner > gmres_colmajor_ssor; + + CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_diag) ); +// CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_I) ); + CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ilut) ); + //CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ssor) ); +} + +void test_gmres() +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(test_gmres_T()); + CALL_SUBTEST_2(test_gmres_T >()); + } +} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/kronecker_product.cpp b/gtsam/3rdparty/Eigen/unsupported/test/kronecker_product.cpp new file mode 100644 index 000000000..3c7a6629f --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/test/kronecker_product.cpp @@ -0,0 +1,194 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Kolja Brix +// Copyright (C) 2011 Andreas Platen +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + + +#include "sparse.h" +#include +#include + + +template +void check_dimension(const MatrixType& ab, const unsigned int rows, const unsigned int cols) +{ + VERIFY_IS_EQUAL(ab.rows(), rows); + VERIFY_IS_EQUAL(ab.cols(), cols); +} + + +template +void check_kronecker_product(const MatrixType& ab) +{ + VERIFY_IS_EQUAL(ab.rows(), 6); + VERIFY_IS_EQUAL(ab.cols(), 6); + VERIFY_IS_EQUAL(ab.nonZeros(), 36); + VERIFY_IS_APPROX(ab.coeff(0,0), -0.4017367630386106); + VERIFY_IS_APPROX(ab.coeff(0,1), 0.1056863433932735); + VERIFY_IS_APPROX(ab.coeff(0,2), -0.7255206194554212); + VERIFY_IS_APPROX(ab.coeff(0,3), 0.1908653336744706); + VERIFY_IS_APPROX(ab.coeff(0,4), 0.350864567234111); + VERIFY_IS_APPROX(ab.coeff(0,5), -0.0923032108308013); + VERIFY_IS_APPROX(ab.coeff(1,0), 0.415417514804677); + VERIFY_IS_APPROX(ab.coeff(1,1), -0.2369227701722048); + VERIFY_IS_APPROX(ab.coeff(1,2), 0.7502275131458511); + VERIFY_IS_APPROX(ab.coeff(1,3), -0.4278731019742696); + VERIFY_IS_APPROX(ab.coeff(1,4), -0.3628129162264507); + VERIFY_IS_APPROX(ab.coeff(1,5), 0.2069210808481275); + VERIFY_IS_APPROX(ab.coeff(2,0), 0.05465890160863986); + VERIFY_IS_APPROX(ab.coeff(2,1), -0.2634092511419858); + VERIFY_IS_APPROX(ab.coeff(2,2), 0.09871180285793758); + VERIFY_IS_APPROX(ab.coeff(2,3), -0.4757066334017702); + VERIFY_IS_APPROX(ab.coeff(2,4), -0.04773740823058334); + VERIFY_IS_APPROX(ab.coeff(2,5), 0.2300535609645254); + VERIFY_IS_APPROX(ab.coeff(3,0), -0.8172945853260133); + VERIFY_IS_APPROX(ab.coeff(3,1), 0.2150086428359221); + VERIFY_IS_APPROX(ab.coeff(3,2), 0.5825113847292743); + VERIFY_IS_APPROX(ab.coeff(3,3), -0.1532433770097174); + VERIFY_IS_APPROX(ab.coeff(3,4), -0.329383387282399); + VERIFY_IS_APPROX(ab.coeff(3,5), 0.08665207912033064); + VERIFY_IS_APPROX(ab.coeff(4,0), 0.8451267514863225); + VERIFY_IS_APPROX(ab.coeff(4,1), -0.481996458918977); + VERIFY_IS_APPROX(ab.coeff(4,2), -0.6023482390791535); + VERIFY_IS_APPROX(ab.coeff(4,3), 0.3435339347164565); + VERIFY_IS_APPROX(ab.coeff(4,4), 0.3406002157428891); + VERIFY_IS_APPROX(ab.coeff(4,5), -0.1942526344200915); + VERIFY_IS_APPROX(ab.coeff(5,0), 0.1111982482925399); + VERIFY_IS_APPROX(ab.coeff(5,1), -0.5358806424754169); + VERIFY_IS_APPROX(ab.coeff(5,2), -0.07925446559335647); + VERIFY_IS_APPROX(ab.coeff(5,3), 0.3819388757769038); + VERIFY_IS_APPROX(ab.coeff(5,4), 0.04481475387219876); + VERIFY_IS_APPROX(ab.coeff(5,5), -0.2159688616158057); +} + + +template +void check_sparse_kronecker_product(const MatrixType& ab) +{ + VERIFY_IS_EQUAL(ab.rows(), 12); + VERIFY_IS_EQUAL(ab.cols(), 10); + VERIFY_IS_EQUAL(ab.nonZeros(), 3*2); + VERIFY_IS_APPROX(ab.coeff(3,0), -0.04); + VERIFY_IS_APPROX(ab.coeff(5,1), 0.05); + VERIFY_IS_APPROX(ab.coeff(0,6), -0.08); + VERIFY_IS_APPROX(ab.coeff(2,7), 0.10); + VERIFY_IS_APPROX(ab.coeff(6,8), 0.12); + VERIFY_IS_APPROX(ab.coeff(8,9), -0.15); +} + + +void test_kronecker_product() +{ + // DM = dense matrix; SM = sparse matrix + Matrix DM_a; + MatrixXd DM_b(3,2); + SparseMatrix SM_a(2,3); + SparseMatrix SM_b(3,2); + SM_a.insert(0,0) = DM_a(0,0) = -0.4461540300782201; + SM_a.insert(0,1) = DM_a(0,1) = -0.8057364375283049; + SM_a.insert(0,2) = DM_a(0,2) = 0.3896572459516341; + SM_a.insert(1,0) = DM_a(1,0) = -0.9076572187376921; + SM_a.insert(1,1) = DM_a(1,1) = 0.6469156566545853; + SM_a.insert(1,2) = DM_a(1,2) = -0.3658010398782789; + SM_b.insert(0,0) = DM_b(0,0) = 0.9004440976767099; + SM_b.insert(0,1) = DM_b(0,1) = -0.2368830858139832; + SM_b.insert(1,0) = DM_b(1,0) = -0.9311078389941825; + SM_b.insert(1,1) = DM_b(1,1) = 0.5310335762980047; + SM_b.insert(2,0) = DM_b(2,0) = -0.1225112806872035; + SM_b.insert(2,1) = DM_b(2,1) = 0.5903998022741264; + SparseMatrix SM_row_a(SM_a), SM_row_b(SM_b); + + // test kroneckerProduct(DM_block,DM,DM_fixedSize) + Matrix DM_fix_ab; + DM_fix_ab(0,0)=37.0; + kroneckerProduct(DM_a.block(0,0,2,3),DM_b,DM_fix_ab); + CALL_SUBTEST(check_kronecker_product(DM_fix_ab)); + + // test kroneckerProduct(DM,DM,DM_block) + MatrixXd DM_block_ab(10,15); + DM_block_ab(0,0)=37.0; + kroneckerProduct(DM_a,DM_b,DM_block_ab.block(2,5,6,6)); + CALL_SUBTEST(check_kronecker_product(DM_block_ab.block(2,5,6,6))); + + // test kroneckerProduct(DM,DM,DM) + MatrixXd DM_ab(1,5); + DM_ab(0,0)=37.0; + kroneckerProduct(DM_a,DM_b,DM_ab); + CALL_SUBTEST(check_kronecker_product(DM_ab)); + + // test kroneckerProduct(SM,DM,SM) + SparseMatrix SM_ab(1,20); + SM_ab.insert(0,0)=37.0; + kroneckerProduct(SM_a,DM_b,SM_ab); + CALL_SUBTEST(check_kronecker_product(SM_ab)); + SparseMatrix SM_ab2(10,3); + SM_ab2.insert(0,0)=37.0; + kroneckerProduct(SM_a,DM_b,SM_ab2); + CALL_SUBTEST(check_kronecker_product(SM_ab2)); + + // test kroneckerProduct(DM,SM,SM) + SM_ab.insert(0,0)=37.0; + kroneckerProduct(DM_a,SM_b,SM_ab); + CALL_SUBTEST(check_kronecker_product(SM_ab)); + SM_ab2.insert(0,0)=37.0; + kroneckerProduct(DM_a,SM_b,SM_ab2); + CALL_SUBTEST(check_kronecker_product(SM_ab2)); + + // test kroneckerProduct(SM,SM,SM) + SM_ab.resize(2,33); + SM_ab.insert(0,0)=37.0; + kroneckerProduct(SM_a,SM_b,SM_ab); + CALL_SUBTEST(check_kronecker_product(SM_ab)); + SM_ab2.resize(5,11); + SM_ab2.insert(0,0)=37.0; + kroneckerProduct(SM_a,SM_b,SM_ab2); + CALL_SUBTEST(check_kronecker_product(SM_ab2)); + + // test kroneckerProduct(SM,SM,SM) with sparse pattern + SM_a.resize(4,5); + SM_b.resize(3,2); + SM_a.resizeNonZeros(0); + SM_b.resizeNonZeros(0); + SM_a.insert(1,0) = -0.1; + SM_a.insert(0,3) = -0.2; + SM_a.insert(2,4) = 0.3; + SM_a.finalize(); + SM_b.insert(0,0) = 0.4; + SM_b.insert(2,1) = -0.5; + SM_b.finalize(); + SM_ab.resize(1,1); + SM_ab.insert(0,0)=37.0; + kroneckerProduct(SM_a,SM_b,SM_ab); + CALL_SUBTEST(check_sparse_kronecker_product(SM_ab)); + + // test dimension of result of kroneckerProduct(DM,DM,DM) + MatrixXd DM_a2(2,1); + MatrixXd DM_b2(5,4); + MatrixXd DM_ab2; + kroneckerProduct(DM_a2,DM_b2,DM_ab2); + CALL_SUBTEST(check_dimension(DM_ab2,2*5,1*4)); + DM_a2.resize(10,9); + DM_b2.resize(4,8); + kroneckerProduct(DM_a2,DM_b2,DM_ab2); + CALL_SUBTEST(check_dimension(DM_ab2,10*4,9*8)); +} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/matrix_exponential.cpp b/gtsam/3rdparty/Eigen/unsupported/test/matrix_exponential.cpp index 996b42a7f..26403c4e6 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/matrix_exponential.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/matrix_exponential.cpp @@ -55,7 +55,7 @@ void test2dRotation(double tol) for (int i=0; i<=20; i++) { angle = static_cast(pow(10, i / 5. - 2)); - B << cos(angle), sin(angle), -sin(angle), cos(angle); + B << std::cos(angle), std::sin(angle), -std::sin(angle), std::cos(angle); C = (angle*A).matrixFunction(expfn); std::cout << "test2dRotation: i = " << i << " error funm = " << relerr(C, B); @@ -146,8 +146,10 @@ void test_matrix_exponential() { CALL_SUBTEST_2(test2dRotation(1e-13)); CALL_SUBTEST_1(test2dRotation(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64 + CALL_SUBTEST_8(test2dRotation(1e-13)); CALL_SUBTEST_2(test2dHyperbolicRotation(1e-14)); CALL_SUBTEST_1(test2dHyperbolicRotation(1e-5)); + CALL_SUBTEST_8(test2dHyperbolicRotation(1e-14)); CALL_SUBTEST_6(testPascal(1e-6)); CALL_SUBTEST_5(testPascal(1e-15)); CALL_SUBTEST_2(randomTest(Matrix2d(), 1e-13)); @@ -158,4 +160,5 @@ void test_matrix_exponential() CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4)); CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4)); CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4)); + CALL_SUBTEST_9(randomTest(Matrix(7,7), 1e-13)); } diff --git a/gtsam/3rdparty/Eigen/unsupported/test/matrix_function.cpp b/gtsam/3rdparty/Eigen/unsupported/test/matrix_function.cpp index 04167abfb..c2ca5d5f1 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/matrix_function.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/matrix_function.cpp @@ -120,6 +120,26 @@ void testMatrixExponential(const MatrixType& A) VERIFY_IS_APPROX(A.exp(), A.matrixFunction(StdStemFunctions::exp)); } +template +void testMatrixLogarithm(const MatrixType& A) +{ + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef std::complex ComplexScalar; + + MatrixType scaledA; + RealScalar maxImagPartOfSpectrum = A.eigenvalues().imag().cwiseAbs().maxCoeff(); + if (maxImagPartOfSpectrum >= 0.9 * M_PI) + scaledA = A * 0.9 * M_PI / maxImagPartOfSpectrum; + else + scaledA = A; + + // identity X.exp().log() = X only holds if Im(lambda) < pi for all eigenvalues of X + MatrixType expA = scaledA.exp(); + MatrixType logExpA = expA.log(); + VERIFY_IS_APPROX(logExpA, scaledA); +} + template void testHyperbolicFunctions(const MatrixType& A) { @@ -157,6 +177,7 @@ template void testMatrix(const MatrixType& A) { testMatrixExponential(A); + testMatrixLogarithm(A); testHyperbolicFunctions(A); testGonioFunctions(A); } diff --git a/gtsam/3rdparty/Eigen/unsupported/test/matrix_square_root.cpp b/gtsam/3rdparty/Eigen/unsupported/test/matrix_square_root.cpp new file mode 100644 index 000000000..8e701aac6 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/test/matrix_square_root.cpp @@ -0,0 +1,77 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Jitse Niesen +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "main.h" +#include + +template ::Scalar>::IsComplex> +struct generateTestMatrix; + +// for real matrices, make sure none of the eigenvalues are negative +template +struct generateTestMatrix +{ + static void run(MatrixType& result, typename MatrixType::Index size) + { + MatrixType mat = MatrixType::Random(size, size); + EigenSolver es(mat); + typename EigenSolver::EigenvalueType eivals = es.eigenvalues(); + for (typename MatrixType::Index i = 0; i < size; ++i) { + if (eivals(i).imag() == 0 && eivals(i).real() < 0) + eivals(i) = -eivals(i); + } + result = (es.eigenvectors() * eivals.asDiagonal() * es.eigenvectors().inverse()).real(); + } +}; + +// for complex matrices, any matrix is fine +template +struct generateTestMatrix +{ + static void run(MatrixType& result, typename MatrixType::Index size) + { + result = MatrixType::Random(size, size); + } +}; + +template +void testMatrixSqrt(const MatrixType& m) +{ + MatrixType A; + generateTestMatrix::run(A, m.rows()); + MatrixType sqrtA = A.sqrt(); + VERIFY_IS_APPROX(sqrtA * sqrtA, A); +} + +void test_matrix_square_root() +{ + for (int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(testMatrixSqrt(Matrix3cf())); + CALL_SUBTEST_2(testMatrixSqrt(MatrixXcd(12,12))); + CALL_SUBTEST_3(testMatrixSqrt(Matrix4f())); + CALL_SUBTEST_4(testMatrixSqrt(Matrix(9, 9))); + CALL_SUBTEST_5(testMatrixSqrt(Matrix())); + CALL_SUBTEST_5(testMatrixSqrt(Matrix,1,1>())); + } +} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/mpreal/dlmalloc.c b/gtsam/3rdparty/Eigen/unsupported/test/mpreal/dlmalloc.c index a2c03b533..7ce8feb07 100755 --- a/gtsam/3rdparty/Eigen/unsupported/test/mpreal/dlmalloc.c +++ b/gtsam/3rdparty/Eigen/unsupported/test/mpreal/dlmalloc.c @@ -1267,7 +1267,7 @@ int mspace_mallopt(int, int); #endif /* MSPACES */ #ifdef __cplusplus -}; /* end of extern "C" */ +} /* end of extern "C" */ #endif /* __cplusplus */ /* diff --git a/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.cpp b/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.cpp index 373f23b12..5c23544ef 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.cpp @@ -3,14 +3,15 @@ Project homepage: http://www.holoborodko.com/pavel/ Contact e-mail: pavel@holoborodko.com - Copyright (c) 2008-2010 Pavel Holoborodko + Copyright (c) 2008-2011 Pavel Holoborodko Core Developers: Pavel Holoborodko, Dmitriy Gubanov, Konstantin Holoborodko. Contributors: Brian Gladman, Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, - Heinz van Saanen, Pere Constans, Peter van Hoof. + Heinz van Saanen, Pere Constans, Peter van Hoof, Gael Guennebaud, + Tsai Chia Cheng, Alexei Zubanov. **************************************************************************** This library is free software; you can redistribute it and/or @@ -27,31 +28,21 @@ License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** **************************************************************************** Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - - 3. Redistributions of any form whatsoever must retain the following - acknowledgment: - " - This product includes software developed by Pavel Holoborodko - Web: http://www.holoborodko.com/pavel/ - e-mail: pavel@holoborodko.com - " - 4. This software cannot be, by any means, used for any commercial - purpose without the prior permission of the copyright holder. - - Any of the above conditions can be waived if you get permission from - the copyright holder. + 3. The name of the author may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE @@ -66,9 +57,11 @@ SUCH DAMAGE. */ #include -#include #include "mpreal.h" + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) #include "dlmalloc.h" +#endif using std::ws; using std::cerr; @@ -79,62 +72,107 @@ using std::istream; namespace mpfr{ -mp_rnd_t mpreal::default_rnd = mpfr_get_default_rounding_mode(); -mp_prec_t mpreal::default_prec = mpfr_get_default_prec(); +mp_rnd_t mpreal::default_rnd = MPFR_RNDN; //(mpfr_get_default_rounding_mode)(); +mp_prec_t mpreal::default_prec = 64; //(mpfr_get_default_prec)(); int mpreal::default_base = 10; int mpreal::double_bits = -1; + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) bool mpreal::is_custom_malloc = false; +#endif // Default constructor: creates mp number and initializes it to 0. mpreal::mpreal() { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,default_prec); mpfr_set_ui(mp,0,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const mpreal& u) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,mpfr_get_prec(u.mp)); mpfr_set(mp,u.mp,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const mpfr_t u) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,mpfr_get_prec(u)); mpfr_set(mp,u,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const mpf_t u) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); - mpfr_init2(mp,mpf_get_prec(u)); +#endif + + mpfr_init2(mp,(mp_prec_t) mpf_get_prec(u)); // (gmp: mp_bitcnt_t) unsigned long -> long (mpfr: mp_prec_t) mpfr_set_f(mp,u,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const mpz_t u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_z(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const mpq_t u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_q(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + if(double_bits == -1 || fits_in_bits(u, double_bits)) { mpfr_init2(mp,prec); mpfr_set_d(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } else throw conversion_overflow(); @@ -142,51 +180,121 @@ mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode) mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_ld(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_ui(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_ui(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_si(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_si(mp,u,mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } +#if defined (MPREAL_HAVE_INT64_SUPPORT) +mpreal::mpreal(const uint64_t u, mp_prec_t prec, mp_rnd_t mode) +{ + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) + set_custom_malloc(); +#endif + + mpfr_init2(mp,prec); + mpfr_set_uj(mp, u, mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; +} + +mpreal::mpreal(const int64_t u, mp_prec_t prec, mp_rnd_t mode) +{ + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) + set_custom_malloc(); +#endif + + mpfr_init2(mp,prec); + mpfr_set_sj(mp, u, mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; +} +#endif + mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_str(mp, s, base, mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t mode) { + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif + mpfr_init2(mp,prec); mpfr_set_str(mp, s.c_str(), base, mode); + + MPREAL_MSVC_DEBUGVIEW_CODE; } mpreal::~mpreal() @@ -198,18 +306,22 @@ mpreal::~mpreal() mpreal& mpreal::operator=(const char* s) { mpfr_t t; - + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); +#endif if(0==mpfr_init_set_str(t,s,default_base,default_rnd)) { - // We will rewrite mp anyway, so use flash it and resize - mpfr_set_prec(mp,mpfr_get_prec(t)); //<- added 01.04.2011 + // We will rewrite mp anyway, so flash it and resize + mpfr_set_prec(mp,mpfr_get_prec(t)); mpfr_set(mp,t,mpreal::default_rnd); mpfr_clear(t); + + MPREAL_MSVC_DEBUGVIEW_CODE; + }else{ mpfr_clear(t); - // cerr<<"fail to convert string"<xp?yp:xp); - - mpfr_hypot(a.mp, x.mp, y.mp, rnd_mode); - - return a; -} - const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode) { mpreal x; @@ -288,21 +385,6 @@ const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode) return x; } -const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) -{ - mpreal a; - mp_prec_t yp, xp; - - yp = y.get_prec(); - xp = x.get_prec(); - - a.set_prec(yp>xp?yp:xp); - - mpfr_remainder(a.mp, x.mp, y.mp, rnd_mode); - - return a; -} - const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) { mpreal a; @@ -319,36 +401,71 @@ const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mod } template -std::string to_string(T t, std::ios_base & (*f)(std::ios_base&)) +std::string toString(T t, std::ios_base & (*f)(std::ios_base&)) { std::ostringstream oss; oss << f << t; return oss.str(); } -mpreal::operator std::string() const +#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) + +std::string mpreal::toString(const std::string& format) const { - return to_string(); + char *s = NULL; + string out; + + if( !format.empty() ) + { + if(!(mpfr_asprintf(&s,format.c_str(),mp) < 0)) + { + out = std::string(s); + + mpfr_free_str(s); + } + } + + return out; } -std::string mpreal::to_string(size_t n, int b, mp_rnd_t mode) const +#endif + +std::string mpreal::toString(int n, int b, mp_rnd_t mode) const { - char *s, *ns = NULL; + (void)b; + (void)mode; +#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) + + // Use MPFR native function for output + char format[128]; + int digits; + + digits = n > 0 ? n : bits2digits(mpfr_get_prec(mp)); + + sprintf(format,"%%.%dRNg",digits); // Default format + + return toString(std::string(format)); + +#else + + char *s, *ns = NULL; size_t slen, nslen; mp_exp_t exp; string out; - + +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) set_custom_malloc(); - +#endif + if(mpfr_inf_p(mp)) { - if(mpfr_sgn(mp)>0) return "+@Inf@"; - else return "-@Inf@"; + if(mpfr_sgn(mp)>0) return "+Inf"; + else return "-Inf"; } if(mpfr_zero_p(mp)) return "0"; - if(mpfr_nan_p(mp)) return "@NaN@"; - + if(mpfr_nan_p(mp)) return "NaN"; + s = mpfr_get_str(NULL,&exp,b,0,mp,mode); ns = mpfr_get_str(NULL,&exp,b,n,mp,mode); @@ -419,8 +536,8 @@ std::string mpreal::to_string(size_t n, int b, mp_rnd_t mode) const // Make final string if(--exp) { - if(exp>0) out += "e+"+mpfr::to_string(exp,std::dec); - else out += "e"+mpfr::to_string(exp,std::dec); + if(exp>0) out += "e+"+mpfr::toString(exp,std::dec); + else out += "e"+mpfr::toString(exp,std::dec); } } @@ -429,79 +546,52 @@ std::string mpreal::to_string(size_t n, int b, mp_rnd_t mode) const }else{ return "conversion error!"; } +#endif } + ////////////////////////////////////////////////////////////////////////// // I/O ostream& operator<<(ostream& os, const mpreal& v) { - return os<(os.precision())); + return os<(os.precision())); } istream& operator>>(istream &is, mpreal& v) { - char c; - string s = ""; - mpfr_t t; - - mpreal::set_custom_malloc(); - - if(is.good()) - { - is>>ws; - while ((c = is.get())!=EOF) - { - if(c ==' ' || c == '\t' || c == '\n' || c == '\r') - { - is.putback(c); - break; - } - s += c; - } - - if(s.size() != 0) - { - // Protect current value from alternation in case of input error - // so some error handling(roll back) procedure can be used - - if(0==mpfr_init_set_str(t,s.c_str(),mpreal::default_base,mpreal::default_rnd)) - { - mpfr_set(v.mp,t,mpreal::default_rnd); - mpfr_clear(t); - - }else{ - mpfr_clear(t); - cerr<<"error reading from istream"<> tmp; + mpfr_set_str(v.mp, tmp.c_str(),mpreal::default_base,mpreal::default_rnd); return is; } -// Optimized dynamic memory allocation/(re-)deallocation. -void * mpreal::mpreal_allocate(size_t alloc_size) -{ - return(dlmalloc(alloc_size)); -} -void * mpreal::mpreal_reallocate(void *ptr, size_t /*old_size*/, size_t new_size) -{ - return(dlrealloc(ptr,new_size)); -} - -void mpreal::mpreal_free(void *ptr, size_t /*size*/) -{ - dlfree(ptr); -} - -inline void mpreal::set_custom_malloc(void) -{ - if(!is_custom_malloc) +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) + // Optimized dynamic memory allocation/(re-)deallocation. + void * mpreal::mpreal_allocate(size_t alloc_size) { - mp_set_memory_functions(mpreal_allocate,mpreal_reallocate,mpreal_free); - is_custom_malloc = true; + return(dlmalloc(alloc_size)); } -} + + void * mpreal::mpreal_reallocate(void *ptr, size_t old_size, size_t new_size) + { + return(dlrealloc(ptr,new_size)); + } + + void mpreal::mpreal_free(void *ptr, size_t size) + { + dlfree(ptr); + } + + inline void mpreal::set_custom_malloc(void) + { + if(!is_custom_malloc) + { + mp_set_memory_functions(mpreal_allocate,mpreal_reallocate,mpreal_free); + is_custom_malloc = true; + } + } +#endif + } diff --git a/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.h b/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.h index 96f474640..c640af947 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.h +++ b/gtsam/3rdparty/Eigen/unsupported/test/mpreal/mpreal.h @@ -1,16 +1,17 @@ /* - Multi-precision real number class. C++ interface fo MPFR library. + Multi-precision real number class. C++ interface for MPFR library. Project homepage: http://www.holoborodko.com/pavel/ Contact e-mail: pavel@holoborodko.com - Copyright (c) 2008-2010 Pavel Holoborodko + Copyright (c) 2008-2012 Pavel Holoborodko Core Developers: Pavel Holoborodko, Dmitriy Gubanov, Konstantin Holoborodko. Contributors: Brian Gladman, Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, - Heinz van Saanen, Pere Constans, Peter van Hoof. + Heinz van Saanen, Pere Constans, Peter van Hoof, Gael Guennebaud, + Tsai Chia Cheng, Alexei Zubanov. **************************************************************************** This library is free software; you can redistribute it and/or @@ -39,19 +40,8 @@ notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - 3. Redistributions of any form whatsoever must retain the following - acknowledgment: - " - This product includes software developed by Pavel Holoborodko - Web: http://www.holoborodko.com/pavel/ - e-mail: pavel@holoborodko.com - " - - 4. This software cannot be, by any means, used for any commercial - purpose without the prior permission of the copyright holder. - - Any of the above conditions can be waived if you get permission from - the copyright holder. + 3. The name of the author may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE @@ -66,8 +56,8 @@ SUCH DAMAGE. */ -#ifndef __MP_REAL_H__ -#define __MP_REAL_H__ +#ifndef __MPREAL_H__ +#define __MPREAL_H__ #include #include @@ -76,22 +66,65 @@ #include #include -#include +// Options +#define MPREAL_HAVE_INT64_SUPPORT // int64_t support: available only for MSVC 2010 & GCC +#define MPREAL_HAVE_MSVC_DEBUGVIEW // Enable Debugger Visualizer (valid only for MSVC in "Debug" builds) // Detect compiler using signatures from http://predef.sourceforge.net/ #if defined(__GNUC__) && defined(__INTEL_COMPILER) #define IsInf(x) isinf(x) // Intel ICC compiler on Linux +#elif defined(_MSC_VER) // Microsoft Visual C++ + #define IsInf(x) (!_finite(x)) + #elif defined(__GNUC__) #define IsInf(x) std::isinf(x) // GNU C/C++ -#elif defined(_MSC_VER) - #define IsInf(x) (!_finite(x)) // Microsoft Visual C++ - #else #define IsInf(x) std::isinf(x) // Unknown compiler, just hope for C99 conformance #endif +#if defined(MPREAL_HAVE_INT64_SUPPORT) + + #define MPFR_USE_INTMAX_T // should be defined before mpfr.h + + #if defined(_MSC_VER) // is available only in msvc2010! + #if (_MSC_VER >= 1600) + #include + #else // MPFR relies on intmax_t which is available only in msvc2010 + #undef MPREAL_HAVE_INT64_SUPPORT // Besides, MPFR - MPIR have to be compiled with msvc2010 + #undef MPFR_USE_INTMAX_T // Since we cannot detect this, disable x64 by default + // Someone should change this manually if needed. + #endif + #endif + + #if defined (__MINGW32__) || defined(__MINGW64__) + #include // equivalent to msvc2010 + #elif defined (__GNUC__) + #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) + #undef MPREAL_HAVE_INT64_SUPPORT // remove all shaman dances for x64 builds since + #undef MPFR_USE_INTMAX_T // GCC already support x64 as of "long int" is 64-bit integer, nothing left to do + #else + #include // use int64_t, uint64_t otherwise. + #endif + #endif + +#endif + +#if defined(MPREAL_HAVE_MSVC_DEBUGVIEW) && defined(_MSC_VER) && defined(_DEBUG) +#define MPREAL_MSVC_DEBUGVIEW_CODE DebugView = toString() + #define MPREAL_MSVC_DEBUGVIEW_DATA std::string DebugView +#else + #define MPREAL_MSVC_DEBUGVIEW_CODE + #define MPREAL_MSVC_DEBUGVIEW_DATA +#endif + +#include + +#if (MPFR_VERSION < MPFR_VERSION_NUM(3,0,0)) + #include // needed for random() +#endif + namespace mpfr { class mpreal { @@ -99,19 +132,17 @@ private: mpfr_t mp; public: - static mp_rnd_t default_rnd; - static mp_prec_t default_prec; - static int default_base; - static int double_bits; - + static mp_rnd_t default_rnd; + static mp_prec_t default_prec; + static int default_base; + static int double_bits; + public: // Constructors && type conversion mpreal(); mpreal(const mpreal& u); - mpreal(const mpfr_t u); mpreal(const mpf_t u); - mpreal(const mpz_t u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); mpreal(const mpq_t u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); mpreal(const double u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); @@ -120,6 +151,12 @@ public: mpreal(const unsigned int u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); mpreal(const long int u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); mpreal(const int u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); + +#if defined (MPREAL_HAVE_INT64_SUPPORT) + mpreal(const uint64_t u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); + mpreal(const int64_t u, mp_prec_t prec = default_prec, mp_rnd_t mode = default_rnd); +#endif + mpreal(const char* s, mp_prec_t prec = default_prec, int base = default_base, mp_rnd_t mode = default_rnd); mpreal(const std::string& s, mp_prec_t prec = default_prec, int base = default_base, mp_rnd_t mode = default_rnd); @@ -155,6 +192,18 @@ public: mpreal& operator+=(const unsigned int u); mpreal& operator+=(const long int u); mpreal& operator+=(const int u); + +#if defined (MPREAL_HAVE_INT64_SUPPORT) + mpreal& operator+=(const int64_t u); + mpreal& operator+=(const uint64_t u); + mpreal& operator-=(const int64_t u); + mpreal& operator-=(const uint64_t u); + mpreal& operator*=(const int64_t u); + mpreal& operator*=(const uint64_t u); + mpreal& operator/=(const int64_t u); + mpreal& operator/=(const uint64_t u); +#endif + const mpreal operator+() const; mpreal& operator++ (); const mpreal operator++ (int); @@ -225,29 +274,49 @@ public: friend bool operator == (const mpreal& a, const mpreal& b); friend bool operator != (const mpreal& a, const mpreal& b); + // Optimized specializations for boolean operators + friend bool operator == (const mpreal& a, const unsigned long int b); + friend bool operator == (const mpreal& a, const unsigned int b); + friend bool operator == (const mpreal& a, const long int b); + friend bool operator == (const mpreal& a, const int b); + friend bool operator == (const mpreal& a, const long double b); + friend bool operator == (const mpreal& a, const double b); + // Type Conversion operators - inline operator long double() const; - inline operator double() const; - inline operator float() const; - inline operator unsigned long() const; - inline operator unsigned int() const; - inline operator long() const; - inline operator int() const; - operator std::string() const; - inline operator mpfr_ptr(); + long toLong() const; + unsigned long toULong() const; + double toDouble() const; + long double toLDouble() const; + +#if defined (MPREAL_HAVE_INT64_SUPPORT) + int64_t toInt64() const; + uint64_t toUInt64() const; +#endif + + // Get raw pointers + ::mpfr_ptr mpfr_ptr(); + ::mpfr_srcptr mpfr_srcptr() const; + + // Convert mpreal to string with n significant digits in base b + // n = 0 -> convert with the maximum available digits + std::string toString(int n = 0, int b = default_base, mp_rnd_t mode = default_rnd) const; + +#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) + std::string toString(const std::string& format) const; +#endif // Math Functions - friend const mpreal sqr(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal sqr (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal sqrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal sqrt(const unsigned long int v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal cbrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal root(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal pow(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal pow(const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal pow(const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal pow(const mpreal& a, const long int b, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal pow(const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal pow(const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal pow (const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal pow (const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal pow (const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal pow (const mpreal& a, const long int b, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal pow (const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal pow (const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal fabs(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal abs(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); @@ -264,8 +333,8 @@ public: friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal exp2 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal exp10(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal log1p (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal expm1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal log1p(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal expm1(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal cos(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal sin(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); @@ -279,15 +348,23 @@ public: friend const mpreal asin (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal atan (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal acot (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal asec (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal acsc (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal cosh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal sinh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal tanh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal sech (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal csch (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal coth (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal acosh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal asinh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal acosh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal asinh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal acoth (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal asech (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal acsch (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal fac_ui (unsigned long int v, mp_prec_t prec = mpreal::default_prec, mp_rnd_t rnd_mode = mpreal::default_rnd); @@ -299,12 +376,12 @@ public: friend const mpreal zeta (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal erf (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal _j0 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal _j1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal _jn (long n, const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal _y0 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal _y1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); - friend const mpreal _yn (long n, const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); + friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode = mpreal::default_rnd); @@ -324,9 +401,15 @@ public: friend const mpreal digamma(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal ai(const mpreal& v, mp_rnd_t rnd_mode = mpreal::default_rnd); friend const mpreal urandom (gmp_randstate_t& state,mp_rnd_t rnd_mode = mpreal::default_rnd); // use gmp_randinit_default() to init state, gmp_randclear() to clear - friend bool _isregular(const mpreal& v); + friend bool isregular(const mpreal& v); #endif - + + // Uniformly distributed random number generation in [0,1] using + // Mersenne-Twister algorithm by default. + // Use parameter to setup seed, e.g.: random((unsigned)time(NULL)) + // Check urandom() for more precise control. + friend const mpreal random(unsigned int seed = 0); + // Exponent and mantissa manipulation friend const mpreal frexp(const mpreal& v, mp_exp_t* exp); friend const mpreal ldexp(const mpreal& v, mp_exp_t exp); @@ -376,22 +459,27 @@ public: #endif // Instance Checkers - friend bool _isnan(const mpreal& v); - friend bool _isinf(const mpreal& v); - friend bool _isnum(const mpreal& v); - friend bool _iszero(const mpreal& v); - friend bool _isint(const mpreal& v); + friend bool isnan (const mpreal& v); + friend bool isinf (const mpreal& v); + friend bool isfinite(const mpreal& v); + + friend bool isnum(const mpreal& v); + friend bool iszero(const mpreal& v); + friend bool isint(const mpreal& v); // Set/Get instance properties inline mp_prec_t get_prec() const; - inline void set_prec(mp_prec_t prec, mp_rnd_t rnd_mode = default_rnd); // Change precision with rounding mode - - // Set mpreal to +-inf, NaN - void set_inf(int sign = +1); - void set_nan(); + inline void set_prec(mp_prec_t prec, mp_rnd_t rnd_mode = default_rnd); // Change precision with rounding mode - // sign = -1 or +1 - void set_sign(int sign, mp_rnd_t rnd_mode = default_rnd); + // Aliases for get_prec(), set_prec() - needed for compatibility with std::complex interface + inline mpreal& setPrecision(int Precision, mp_rnd_t RoundingMode = (mpfr_get_default_rounding_mode)()); + inline int getPrecision() const; + + // Set mpreal to +/- inf, NaN, +/-0 + mpreal& setInf (int Sign = +1); + mpreal& setNan (); + mpreal& setZero (int Sign = +1); + mpreal& setSign (int Sign, mp_rnd_t RoundingMode = (mpfr_get_default_rounding_mode)()); //Exponent mp_exp_t get_exp(); @@ -411,36 +499,25 @@ public: static int get_double_bits(); static void set_default_rnd(mp_rnd_t rnd_mode); static mp_rnd_t get_default_rnd(); - static mp_exp_t get_emin (void); - static mp_exp_t get_emax (void); - static mp_exp_t get_emin_min (void); - static mp_exp_t get_emin_max (void); - static mp_exp_t get_emax_min (void); - static mp_exp_t get_emax_max (void); - static int set_emin (mp_exp_t exp); - static int set_emax (mp_exp_t exp); + static mp_exp_t get_emin (void); + static mp_exp_t get_emax (void); + static mp_exp_t get_emin_min (void); + static mp_exp_t get_emin_max (void); + static mp_exp_t get_emax_min (void); + static mp_exp_t get_emax_max (void); + static int set_emin (mp_exp_t exp); + static int set_emax (mp_exp_t exp); - // Get/Set conversions - // Convert mpreal to string with n significant digits in base b - // n = 0 -> convert with the maximum available digits - std::string to_string(size_t n = 0, int b = default_base, mp_rnd_t mode = default_rnd) const; - // Efficient swapping of two mpreal values friend void swap(mpreal& x, mpreal& y); //Min Max - macros is evil. Needed for systems which defines max and min globally as macros (e.g. Windows) //Hope that globally defined macros use > < operations only - #ifndef max - friend const mpreal max(const mpreal& x, const mpreal& y); - #endif - - #ifndef min - friend const mpreal min(const mpreal& x, const mpreal& y); - #endif - friend const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = default_rnd); friend const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = default_rnd); +#if defined (MPREAL_HAVE_CUSTOM_MPFR_MALLOC) + private: // Optimized dynamic memory allocation/(re-)deallocation. static bool is_custom_malloc; @@ -448,6 +525,20 @@ private: static void *mpreal_reallocate (void *ptr, size_t old_size, size_t new_size); static void mpreal_free (void *ptr, size_t size); inline static void set_custom_malloc (void); + +#endif + + +private: + // Human friendly Debug Preview in Visual Studio. + // Put one of these lines: + // + // mpfr::mpreal= ; Show value only + // mpfr::mpreal=, bits ; Show value & precision + // + // at the beginning of + // [Visual Studio Installation Folder]\Common7\Packages\Debugger\autoexp.dat + MPREAL_MSVC_DEBUGVIEW_DATA }; ////////////////////////////////////////////////////////////////////////// @@ -457,190 +548,63 @@ public: std::string why() { return "inexact conversion from floating point"; } }; -////////////////////////////////////////////////////////////////////////// +namespace internal{ + + // Use SFINAE to restrict arithmetic operations instantiation only for numeric types + // This is needed for smooth integration with libraries based on expression templates + template struct result_type {}; + + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; + +#if defined (MPREAL_HAVE_INT64_SUPPORT) + template <> struct result_type {typedef mpreal type;}; + template <> struct result_type {typedef mpreal type;}; +#endif +} + // + Addition -const mpreal operator+(const mpreal& a, const mpreal& b); +template +inline const typename internal::result_type::type + operator+(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) += rhs; } -// + Fast specialized addition - implemented through fast += operations -const mpreal operator+(const mpreal& a, const mpz_t b); -const mpreal operator+(const mpreal& a, const mpq_t b); -const mpreal operator+(const mpreal& a, const long double b); -const mpreal operator+(const mpreal& a, const double b); -const mpreal operator+(const mpreal& a, const unsigned long int b); -const mpreal operator+(const mpreal& a, const unsigned int b); -const mpreal operator+(const mpreal& a, const long int b); -const mpreal operator+(const mpreal& a, const int b); -const mpreal operator+(const mpreal& a, const char* b); -const mpreal operator+(const char* a, const mpreal& b); -const std::string operator+(const mpreal& a, const std::string b); -const std::string operator+(const std::string a, const mpreal& b); +template +inline const typename internal::result_type::type + operator+(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) += lhs; } -const mpreal operator+(const mpz_t b, const mpreal& a); -const mpreal operator+(const mpq_t b, const mpreal& a); -const mpreal operator+(const long double b, const mpreal& a); -const mpreal operator+(const double b, const mpreal& a); -const mpreal operator+(const unsigned long int b, const mpreal& a); -const mpreal operator+(const unsigned int b, const mpreal& a); -const mpreal operator+(const long int b, const mpreal& a); -const mpreal operator+(const int b, const mpreal& a); - -////////////////////////////////////////////////////////////////////////// // - Subtraction -const mpreal operator-(const mpreal& a, const mpreal& b); +template +inline const typename internal::result_type::type + operator-(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) -= rhs; } -// - Fast specialized subtraction - implemented through fast -= operations -const mpreal operator-(const mpreal& a, const mpz_t b); -const mpreal operator-(const mpreal& a, const mpq_t b); -const mpreal operator-(const mpreal& a, const long double b); -const mpreal operator-(const mpreal& a, const double b); -const mpreal operator-(const mpreal& a, const unsigned long int b); -const mpreal operator-(const mpreal& a, const unsigned int b); -const mpreal operator-(const mpreal& a, const long int b); -const mpreal operator-(const mpreal& a, const int b); -const mpreal operator-(const mpreal& a, const char* b); -const mpreal operator-(const char* a, const mpreal& b); +template +inline const typename internal::result_type::type + operator-(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) -= rhs; } -const mpreal operator-(const mpz_t b, const mpreal& a); -const mpreal operator-(const mpq_t b, const mpreal& a); -const mpreal operator-(const long double b, const mpreal& a); -//const mpreal operator-(const double b, const mpreal& a); - -////////////////////////////////////////////////////////////////////////// // * Multiplication -const mpreal operator*(const mpreal& a, const mpreal& b); +template +inline const typename internal::result_type::type + operator*(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) *= rhs; } -// * Fast specialized multiplication - implemented through fast *= operations -const mpreal operator*(const mpreal& a, const mpz_t b); -const mpreal operator*(const mpreal& a, const mpq_t b); -const mpreal operator*(const mpreal& a, const long double b); -const mpreal operator*(const mpreal& a, const double b); -const mpreal operator*(const mpreal& a, const unsigned long int b); -const mpreal operator*(const mpreal& a, const unsigned int b); -const mpreal operator*(const mpreal& a, const long int b); -const mpreal operator*(const mpreal& a, const int b); +template +inline const typename internal::result_type::type + operator*(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) *= lhs; } -const mpreal operator*(const mpz_t b, const mpreal& a); -const mpreal operator*(const mpq_t b, const mpreal& a); -const mpreal operator*(const long double b, const mpreal& a); -const mpreal operator*(const double b, const mpreal& a); -const mpreal operator*(const unsigned long int b, const mpreal& a); -const mpreal operator*(const unsigned int b, const mpreal& a); -const mpreal operator*(const long int b, const mpreal& a); -const mpreal operator*(const int b, const mpreal& a); - -////////////////////////////////////////////////////////////////////////// // / Division -const mpreal operator/(const mpreal& a, const mpreal& b); +template +inline const typename internal::result_type::type + operator/(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) /= rhs; } -// / Fast specialized division - implemented through fast /= operations -const mpreal operator/(const mpreal& a, const mpz_t b); -const mpreal operator/(const mpreal& a, const mpq_t b); -const mpreal operator/(const mpreal& a, const long double b); -const mpreal operator/(const mpreal& a, const double b); -const mpreal operator/(const mpreal& a, const unsigned long int b); -const mpreal operator/(const mpreal& a, const unsigned int b); -const mpreal operator/(const mpreal& a, const long int b); -const mpreal operator/(const mpreal& a, const int b); - -const mpreal operator/(const long double b, const mpreal& a); - -////////////////////////////////////////////////////////////////////////// -// Shifts operators - Multiplication/Division by a power of 2 -const mpreal operator<<(const mpreal& v, const unsigned long int k); -const mpreal operator<<(const mpreal& v, const unsigned int k); -const mpreal operator<<(const mpreal& v, const long int k); -const mpreal operator<<(const mpreal& v, const int k); - -const mpreal operator>>(const mpreal& v, const unsigned long int k); -const mpreal operator>>(const mpreal& v, const unsigned int k); -const mpreal operator>>(const mpreal& v, const long int k); -const mpreal operator>>(const mpreal& v, const int k); - -////////////////////////////////////////////////////////////////////////// -// Boolean operators -bool operator < (const mpreal& a, const unsigned long int b); -bool operator < (const mpreal& a, const unsigned int b); -bool operator < (const mpreal& a, const long int b); -bool operator < (const mpreal& a, const int b); -bool operator < (const mpreal& a, const long double b); -bool operator < (const mpreal& a, const double b); - -bool operator < (const unsigned long int a,const mpreal& b); -bool operator < (const unsigned int a, const mpreal& b); -bool operator < (const long int a, const mpreal& b); -bool operator < (const int a, const mpreal& b); -bool operator < (const long double a, const mpreal& b); -bool operator < (const double a, const mpreal& b); - -bool operator > (const mpreal& a, const unsigned long int b); -bool operator > (const mpreal& a, const unsigned int b); -bool operator > (const mpreal& a, const long int b); -bool operator > (const mpreal& a, const int b); -bool operator > (const mpreal& a, const long double b); -bool operator > (const mpreal& a, const double b); - -bool operator > (const unsigned long int a,const mpreal& b); -bool operator > (const unsigned int a, const mpreal& b); -bool operator > (const long int a, const mpreal& b); -bool operator > (const int a, const mpreal& b); -bool operator > (const long double a, const mpreal& b); -bool operator > (const double a, const mpreal& b); - -bool operator >= (const mpreal& a, const unsigned long int b); -bool operator >= (const mpreal& a, const unsigned int b); -bool operator >= (const mpreal& a, const long int b); -bool operator >= (const mpreal& a, const int b); -bool operator >= (const mpreal& a, const long double b); -bool operator >= (const mpreal& a, const double b); - -bool operator >= (const unsigned long int a,const mpreal& b); -bool operator >= (const unsigned int a, const mpreal& b); -bool operator >= (const long int a, const mpreal& b); -bool operator >= (const int a, const mpreal& b); -bool operator >= (const long double a, const mpreal& b); -bool operator >= (const double a, const mpreal& b); - -bool operator <= (const mpreal& a, const unsigned long int b); -bool operator <= (const mpreal& a, const unsigned int b); -bool operator <= (const mpreal& a, const long int b); -bool operator <= (const mpreal& a, const int b); -bool operator <= (const mpreal& a, const long double b); -bool operator <= (const mpreal& a, const double b); - -bool operator <= (const unsigned long int a,const mpreal& b); -bool operator <= (const unsigned int a, const mpreal& b); -bool operator <= (const long int a, const mpreal& b); -bool operator <= (const int a, const mpreal& b); -bool operator <= (const long double a, const mpreal& b); -bool operator <= (const double a, const mpreal& b); - -bool operator == (const mpreal& a, const unsigned long int b); -bool operator == (const mpreal& a, const unsigned int b); -bool operator == (const mpreal& a, const long int b); -bool operator == (const mpreal& a, const int b); -bool operator == (const mpreal& a, const long double b); -bool operator == (const mpreal& a, const double b); - -bool operator == (const unsigned long int a,const mpreal& b); -bool operator == (const unsigned int a, const mpreal& b); -bool operator == (const long int a, const mpreal& b); -bool operator == (const int a, const mpreal& b); -bool operator == (const long double a, const mpreal& b); -bool operator == (const double a, const mpreal& b); - -bool operator != (const mpreal& a, const unsigned long int b); -bool operator != (const mpreal& a, const unsigned int b); -bool operator != (const mpreal& a, const long int b); -bool operator != (const mpreal& a, const int b); -bool operator != (const mpreal& a, const long double b); -bool operator != (const mpreal& a, const double b); - -bool operator != (const unsigned long int a,const mpreal& b); -bool operator != (const unsigned int a, const mpreal& b); -bool operator != (const long int a, const mpreal& b); -bool operator != (const int a, const mpreal& b); -bool operator != (const long double a, const mpreal& b); -bool operator != (const double a, const mpreal& b); +template +inline const typename internal::result_type::type + operator/(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) /= rhs; } ////////////////////////////////////////////////////////////////////////// // sqrt @@ -704,22 +668,45 @@ const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode = mpreal::defaul ////////////////////////////////////////////////////////////////////////// // Estimate machine epsilon for the given precision -inline const mpreal machine_epsilon(mp_prec_t prec = mpreal::default_prec); -inline const mpreal mpreal_min(mp_prec_t prec = mpreal::default_prec); -inline const mpreal mpreal_max(mp_prec_t prec = mpreal::default_prec); +// Returns smallest eps such that 1.0 + eps != 1.0 +inline const mpreal machine_epsilon(mp_prec_t prec = mpreal::get_default_prec()); + +// Returns the positive distance from abs(x) to the next larger in magnitude floating point number of the same precision as x +inline const mpreal machine_epsilon(const mpreal& x); + +inline const mpreal mpreal_min(mp_prec_t prec = mpreal::get_default_prec()); +inline const mpreal mpreal_max(mp_prec_t prec = mpreal::get_default_prec()); +inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps); +inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps); ////////////////////////////////////////////////////////////////////////// -// Implementation of inline functions +// Bits - decimal digits relation +// bits = ceil(digits*log[2](10)) +// digits = floor(bits*log[10](2)) + +inline mp_prec_t digits2bits(int d); +inline int bits2digits(mp_prec_t b); + +////////////////////////////////////////////////////////////////////////// +// min, max +const mpreal (max)(const mpreal& x, const mpreal& y); +const mpreal (min)(const mpreal& x, const mpreal& y); + +////////////////////////////////////////////////////////////////////////// +// Implementation ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// // Operators - Assignment inline mpreal& mpreal::operator=(const mpreal& v) { - if (this!= &v) + if (this != &v) { - mpfr_set_prec(mp,mpfr_get_prec(v.mp)); + mpfr_clear(mp); + mpfr_init2(mp,mpfr_get_prec(v.mp)); mpfr_set(mp,v.mp,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; } return *this; } @@ -727,24 +714,32 @@ inline mpreal& mpreal::operator=(const mpreal& v) inline mpreal& mpreal::operator=(const mpf_t v) { mpfr_set_f(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const mpz_t v) { mpfr_set_z(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const mpq_t v) { mpfr_set_q(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const long double v) { mpfr_set_ld(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -753,6 +748,8 @@ inline mpreal& mpreal::operator=(const double v) if(double_bits == -1 || fits_in_bits(v, double_bits)) { mpfr_set_d(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; } else throw conversion_overflow(); @@ -763,24 +760,32 @@ inline mpreal& mpreal::operator=(const double v) inline mpreal& mpreal::operator=(const unsigned long int v) { mpfr_set_ui(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const unsigned int v) { mpfr_set_ui(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const long int v) { mpfr_set_si(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const int v) { mpfr_set_si(mp,v,default_rnd); + + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -789,70 +794,90 @@ inline mpreal& mpreal::operator=(const int v) inline mpreal& mpreal::operator+=(const mpreal& v) { mpfr_add(mp,mp,v.mp,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const mpf_t u) { *this += mpreal(u); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const mpz_t u) { mpfr_add_z(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const mpq_t u) { mpfr_add_q(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+= (const long double u) { - return *this += mpreal(u); + *this += mpreal(u); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator+= (const double u) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) mpfr_add_d(mp,mp,u,default_rnd); - return *this; #else - return *this += mpreal(u); + *this += mpreal(u); #endif + + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator+=(const unsigned long int u) { mpfr_add_ui(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const unsigned int u) { mpfr_add_ui(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const long int u) { mpfr_add_si(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const int u) { mpfr_add_si(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } -inline const mpreal mpreal::operator+()const -{ - return mpreal(*this); -} +#if defined (MPREAL_HAVE_INT64_SUPPORT) +inline mpreal& mpreal::operator+=(const int64_t u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator+=(const uint64_t u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator-=(const int64_t u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator-=(const uint64_t u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator*=(const int64_t u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator*=(const uint64_t u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator/=(const int64_t u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +inline mpreal& mpreal::operator/=(const uint64_t u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } +#endif + +inline const mpreal mpreal::operator+()const { return mpreal(*this); } inline const mpreal operator+(const mpreal& a, const mpreal& b) { @@ -861,111 +886,9 @@ inline const mpreal operator+(const mpreal& a, const mpreal& b) else return mpreal(b) += a; } -inline const std::string operator+(const mpreal& a, const std::string b) -{ - return (std::string)a+b; -} - -inline const std::string operator+(const std::string a, const mpreal& b) -{ - return a+(std::string)b; -} - -inline const mpreal operator+(const mpreal& a, const mpz_t b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const char* b) -{ - return a+mpreal(b); -} - -inline const mpreal operator+(const char* a, const mpreal& b) -{ - return mpreal(a)+b; - -} - -inline const mpreal operator+(const mpreal& a, const mpq_t b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const long double b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const double b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const unsigned long int b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const unsigned int b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const long int b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpreal& a, const int b) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpz_t b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const mpq_t b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const long double b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const double b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const unsigned long int b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const unsigned int b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const long int b, const mpreal& a) -{ - return mpreal(a) += b; -} - -inline const mpreal operator+(const int b, const mpreal& a) -{ - return mpreal(a) += b; -} - inline mpreal& mpreal::operator++() { - *this += 1; - return *this; + return *this += 1; } inline const mpreal mpreal::operator++ (int) @@ -977,8 +900,7 @@ inline const mpreal mpreal::operator++ (int) inline mpreal& mpreal::operator--() { - *this -= 1; - return *this; + return *this -= 1; } inline const mpreal mpreal::operator-- (int) @@ -993,57 +915,68 @@ inline const mpreal mpreal::operator-- (int) inline mpreal& mpreal::operator-= (const mpreal& v) { mpfr_sub(mp,mp,v.mp,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const mpz_t v) { mpfr_sub_z(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const mpq_t v) { mpfr_sub_q(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const long double v) { - return *this -= mpreal(v); + *this -= mpreal(v); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator-=(const double v) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) mpfr_sub_d(mp,mp,v,default_rnd); - return *this; #else - return *this -= mpreal(v); + *this -= mpreal(v); #endif + + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator-=(const unsigned long int v) { mpfr_sub_ui(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const unsigned int v) { mpfr_sub_ui(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const long int v) { mpfr_sub_si(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const int v) { mpfr_sub_si(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1057,63 +990,14 @@ inline const mpreal mpreal::operator-()const inline const mpreal operator-(const mpreal& a, const mpreal& b) { // prec(a-b) = max(prec(a),prec(b)) - if(a.get_prec()>b.get_prec()) return mpreal(a) -= b; - else return -(mpreal(b) -= a); -} - -inline const mpreal operator-(const mpreal& a, const mpz_t b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const mpq_t b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const long double b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const double b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const unsigned long int b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const unsigned int b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const long int b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpreal& a, const int b) -{ - return mpreal(a) -= b; -} - -inline const mpreal operator-(const mpz_t b, const mpreal& a) -{ - return -(mpreal(a) -= b); -} - -inline const mpreal operator-(const mpq_t b, const mpreal& a) -{ - return -(mpreal(a) -= b); -} - -inline const mpreal operator-(const long double b, const mpreal& a) -{ - return -(mpreal(a) -= b); + if(a.getPrecision() >= b.getPrecision()) + { + return mpreal(a) -= b; + }else{ + mpreal x(a); + x.setPrecision(b.getPrecision()); + return x -= b; + } } inline const mpreal operator-(const double b, const mpreal& a) @@ -1123,7 +1007,7 @@ inline const mpreal operator-(const double b, const mpreal& a) mpfr_d_sub(x.mp,b,a.mp,mpreal::default_rnd); return x; #else - return -(mpreal(a) -= b); + return mpreal(b) -= a; #endif } @@ -1155,160 +1039,81 @@ inline const mpreal operator-(const int b, const mpreal& a) return x; } -inline const mpreal operator-(const mpreal& a, const char* b) -{ - return a-mpreal(b); -} - -inline const mpreal operator-(const char* a, const mpreal& b) -{ - return mpreal(a)-b; -} - ////////////////////////////////////////////////////////////////////////// // * Multiplication inline mpreal& mpreal::operator*= (const mpreal& v) { mpfr_mul(mp,mp,v.mp,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const mpz_t v) { mpfr_mul_z(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const mpq_t v) { mpfr_mul_q(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const long double v) { - return *this *= mpreal(v); + *this *= mpreal(v); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator*=(const double v) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) mpfr_mul_d(mp,mp,v,default_rnd); - return *this; #else - return *this *= mpreal(v); + *this *= mpreal(v); #endif + + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator*=(const unsigned long int v) { mpfr_mul_ui(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const unsigned int v) { mpfr_mul_ui(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const long int v) { mpfr_mul_si(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const int v) { mpfr_mul_si(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline const mpreal operator*(const mpreal& a, const mpreal& b) { // prec(a*b) = max(prec(a),prec(b)) - if(a.get_prec()>b.get_prec()) return mpreal(a) *= b; - else return mpreal(b) *= a; -} - -inline const mpreal operator*(const mpreal& a, const mpz_t b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const mpq_t b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const long double b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const double b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const unsigned long int b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const unsigned int b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const long int b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpreal& a, const int b) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpz_t b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const mpq_t b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const long double b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const double b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const unsigned long int b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const unsigned int b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const long int b, const mpreal& a) -{ - return mpreal(a) *= b; -} - -inline const mpreal operator*(const int b, const mpreal& a) -{ - return mpreal(a) *= b; + if(a.getPrecision() >= b.getPrecision()) return mpreal(a) *= b; + else return mpreal(b) *= a; } ////////////////////////////////////////////////////////////////////////// @@ -1316,112 +1121,82 @@ inline const mpreal operator*(const int b, const mpreal& a) inline mpreal& mpreal::operator/=(const mpreal& v) { mpfr_div(mp,mp,v.mp,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const mpz_t v) { mpfr_div_z(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const mpq_t v) { mpfr_div_q(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const long double v) { - return *this /= mpreal(v); + *this /= mpreal(v); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator/=(const double v) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) mpfr_div_d(mp,mp,v,default_rnd); - return *this; #else - return *this /= mpreal(v); + *this /= mpreal(v); #endif + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mpreal& mpreal::operator/=(const unsigned long int v) { mpfr_div_ui(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const unsigned int v) { mpfr_div_ui(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const long int v) { mpfr_div_si(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const int v) { mpfr_div_si(mp,mp,v,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline const mpreal operator/(const mpreal& a, const mpreal& b) { - mpreal x(a); - mp_prec_t pb; - mp_prec_t pa; - // prec(a/b) = max(prec(a),prec(b)) - pa = a.get_prec(); - pb = b.get_prec(); - if(pb>pa) x.set_prec(pb); + if(a.getPrecision() >= b.getPrecision()) + { + return mpreal(a) /= b; + }else{ - return x /= b; -} - -inline const mpreal operator/(const mpreal& a, const mpz_t b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const mpq_t b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const long double b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const double b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const unsigned long int b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const unsigned int b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const long int b) -{ - return mpreal(a) /= b; -} - -inline const mpreal operator/(const mpreal& a, const int b) -{ - return mpreal(a) /= b; + mpreal x(a); + x.setPrecision(b.getPrecision()); + return x /= b; + } } inline const mpreal operator/(const unsigned long int b, const mpreal& a) @@ -1452,12 +1227,6 @@ inline const mpreal operator/(const int b, const mpreal& a) return x; } -inline const mpreal operator/(const long double b, const mpreal& a) -{ - mpreal x(b); - return x/a; -} - inline const mpreal operator/(const double b, const mpreal& a) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) @@ -1465,8 +1234,7 @@ inline const mpreal operator/(const double b, const mpreal& a) mpfr_d_div(x.mp,b,a.mp,mpreal::default_rnd); return x; #else - mpreal x(b); - return x/a; + return mpreal(b) /= a; #endif } @@ -1475,48 +1243,56 @@ inline const mpreal operator/(const double b, const mpreal& a) inline mpreal& mpreal::operator<<=(const unsigned long int u) { mpfr_mul_2ui(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator<<=(const unsigned int u) { mpfr_mul_2ui(mp,mp,static_cast(u),default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator<<=(const long int u) { mpfr_mul_2si(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator<<=(const int u) { mpfr_mul_2si(mp,mp,static_cast(u),default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const unsigned long int u) { mpfr_div_2ui(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const unsigned int u) { mpfr_div_2ui(mp,mp,static_cast(u),default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const long int u) { mpfr_div_2si(mp,mp,u,default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const int u) { mpfr_div_2si(mp,mp,static_cast(u),default_rnd); + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1592,468 +1368,67 @@ inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode) ////////////////////////////////////////////////////////////////////////// //Boolean operators -inline bool operator > (const mpreal& a, const mpreal& b) -{ - return (mpfr_greater_p(a.mp,b.mp)!=0); -} - -inline bool operator > (const mpreal& a, const unsigned long int b) -{ - return a>mpreal(b); -} - -inline bool operator > (const mpreal& a, const unsigned int b) -{ - return a>mpreal(b); -} - -inline bool operator > (const mpreal& a, const long int b) -{ - return a>mpreal(b); -} - -inline bool operator > (const mpreal& a, const int b) -{ - return a>mpreal(b); -} - -inline bool operator > (const mpreal& a, const long double b) -{ - return a>mpreal(b); -} - -inline bool operator > (const mpreal& a, const double b) -{ - return a>mpreal(b); -} - -inline bool operator > (const unsigned long int a, const mpreal& b) -{ - return mpreal(a)>b; -} - -inline bool operator > (const unsigned int a, const mpreal& b) -{ - return mpreal(a)>b; -} - -inline bool operator > (const long int a, const mpreal& b) -{ - return mpreal(a)>b; -} - -inline bool operator > (const int a, const mpreal& b) -{ - return mpreal(a)>b; -} - -inline bool operator > (const long double a, const mpreal& b) -{ - return mpreal(a)>b; -} - -inline bool operator > (const double a, const mpreal& b) -{ - return mpreal(a)>b; -} - -inline bool operator >= (const mpreal& a, const mpreal& b) -{ - return (mpfr_greaterequal_p(a.mp,b.mp)!=0); -} - -inline bool operator >= (const mpreal& a, const unsigned long int b) -{ - return a>=mpreal(b); -} - -inline bool operator >= (const mpreal& a, const unsigned int b) -{ - return a>=mpreal(b); -} - -inline bool operator >= (const mpreal& a, const long int b) -{ - return a>=mpreal(b); -} - -inline bool operator >= (const mpreal& a, const int b) -{ - return a>=mpreal(b); -} - -inline bool operator >= (const mpreal& a, const long double b) -{ - return a>=mpreal(b); -} - -inline bool operator >= (const mpreal& a, const double b) -{ - return a>=mpreal(b); -} - -inline bool operator >= (const unsigned long int a,const mpreal& b) -{ - return mpreal(a)>=b; -} - -inline bool operator >= (const unsigned int a, const mpreal& b) -{ - return mpreal(a)>=b; -} - -inline bool operator >= (const long int a, const mpreal& b) -{ - return mpreal(a)>=b; -} - -inline bool operator >= (const int a, const mpreal& b) -{ - return mpreal(a)>=b; -} - -inline bool operator >= (const long double a, const mpreal& b) -{ - return mpreal(a)>=b; -} - -inline bool operator >= (const double a, const mpreal& b) -{ - return mpreal(a)>=b; -} - -inline bool operator < (const mpreal& a, const mpreal& b) -{ - return (mpfr_less_p(a.mp,b.mp)!=0); -} - -inline bool operator < (const mpreal& a, const unsigned long int b) -{ - return a (const mpreal& a, const mpreal& b){ return (mpfr_greater_p(a.mp,b.mp) !=0); } +inline bool operator >= (const mpreal& a, const mpreal& b){ return (mpfr_greaterequal_p(a.mp,b.mp) !=0); } +inline bool operator < (const mpreal& a, const mpreal& b){ return (mpfr_less_p(a.mp,b.mp) !=0); } +inline bool operator <= (const mpreal& a, const mpreal& b){ return (mpfr_lessequal_p(a.mp,b.mp) !=0); } +inline bool operator == (const mpreal& a, const mpreal& b){ return (mpfr_equal_p(a.mp,b.mp) !=0); } +inline bool operator != (const mpreal& a, const mpreal& b){ return (mpfr_lessgreater_p(a.mp,b.mp) !=0); } + +inline bool operator == (const mpreal& a, const unsigned long int b ){ return (mpfr_cmp_ui(a.mp,b) == 0); } +inline bool operator == (const mpreal& a, const unsigned int b ){ return (mpfr_cmp_ui(a.mp,b) == 0); } +inline bool operator == (const mpreal& a, const long int b ){ return (mpfr_cmp_si(a.mp,b) == 0); } +inline bool operator == (const mpreal& a, const int b ){ return (mpfr_cmp_si(a.mp,b) == 0); } +inline bool operator == (const mpreal& a, const long double b ){ return (mpfr_cmp_ld(a.mp,b) == 0); } +inline bool operator == (const mpreal& a, const double b ){ return (mpfr_cmp_d(a.mp,b) == 0); } + + +inline bool isnan (const mpreal& v){ return (mpfr_nan_p(v.mp) != 0); } +inline bool isinf (const mpreal& v){ return (mpfr_inf_p(v.mp) != 0); } +inline bool isfinite(const mpreal& v){ return (mpfr_number_p(v.mp) != 0); } +inline bool iszero (const mpreal& v){ return (mpfr_zero_p(v.mp) != 0); } +inline bool isint (const mpreal& v){ return (mpfr_integer_p(v.mp) != 0); } #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) -inline bool _isregular(const mpreal& v) -{ - return (mpfr_regular_p(v.mp)); -} -#endif // MPFR 3.0.0 Specifics +inline bool isregular(const mpreal& v){ return (mpfr_regular_p(v.mp));} +#endif ////////////////////////////////////////////////////////////////////////// // Type Converters -inline mpreal::operator double() const +inline long mpreal::toLong() const { return mpfr_get_si(mp,GMP_RNDZ); } +inline unsigned long mpreal::toULong() const { return mpfr_get_ui(mp,GMP_RNDZ); } +inline double mpreal::toDouble() const { return mpfr_get_d(mp,default_rnd); } +inline long double mpreal::toLDouble() const { return mpfr_get_ld(mp,default_rnd); } + +#if defined (MPREAL_HAVE_INT64_SUPPORT) +inline int64_t mpreal::toInt64() const{ return mpfr_get_sj(mp,GMP_RNDZ); } +inline uint64_t mpreal::toUInt64() const{ return mpfr_get_uj(mp,GMP_RNDZ); } +#endif + +inline ::mpfr_ptr mpreal::mpfr_ptr() { return mp; } +inline ::mpfr_srcptr mpreal::mpfr_srcptr() const { return const_cast< ::mpfr_srcptr >(mp); } + +////////////////////////////////////////////////////////////////////////// +// Bits - decimal digits relation +// bits = ceil(digits*log[2](10)) +// digits = floor(bits*log[10](2)) + +inline mp_prec_t digits2bits(int d) { - return mpfr_get_d(mp,default_rnd); + const double LOG2_10 = 3.3219280948873624; + + d = 10>d?10:d; + + return (mp_prec_t)std::ceil((d)*LOG2_10); } -inline mpreal::operator float() const +inline int bits2digits(mp_prec_t b) { - return (float)mpfr_get_d(mp,default_rnd); -} + const double LOG10_2 = 0.30102999566398119; -inline mpreal::operator long double() const -{ - return mpfr_get_ld(mp,default_rnd); -} + b = 34>b?34:b; -inline mpreal::operator unsigned long() const -{ - return mpfr_get_ui(mp,GMP_RNDZ); -} - -inline mpreal::operator unsigned int() const -{ - return static_cast(mpfr_get_ui(mp,GMP_RNDZ)); -} - -inline mpreal::operator long() const -{ - return mpfr_get_si(mp,GMP_RNDZ); -} - -inline mpreal::operator int() const -{ - return static_cast(mpfr_get_si(mp,GMP_RNDZ)); -} - -inline mpreal::operator mpfr_ptr() -{ - return mp; + return (int)std::floor((b)*LOG10_2); } ////////////////////////////////////////////////////////////////////////// @@ -2064,9 +1439,44 @@ inline int sgn(const mpreal& v) return (r>0?-1:1); } -inline void mpreal::set_sign(int sign, mp_rnd_t rnd_mode) +inline mpreal& mpreal::setSign(int sign, mp_rnd_t RoundingMode) { - mpfr_setsign(mp,mp,(sign<0?1:0),rnd_mode); + mpfr_setsign(mp,mp,(sign<0?1:0),RoundingMode); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; +} + +inline int mpreal::getPrecision() const +{ + return mpfr_get_prec(mp); +} + +inline mpreal& mpreal::setPrecision(int Precision, mp_rnd_t RoundingMode) +{ + mpfr_prec_round(mp,Precision, RoundingMode); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; +} + +inline mpreal& mpreal::setInf(int sign) +{ + mpfr_set_inf(mp,sign); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; +} + +inline mpreal& mpreal::setNan() +{ + mpfr_set_nan(mp); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; +} + +inline mpreal& mpreal::setZero(int sign) +{ + mpfr_set_zero(mp,sign); + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; } inline mp_prec_t mpreal::get_prec() const @@ -2077,16 +1487,7 @@ inline mp_prec_t mpreal::get_prec() const inline void mpreal::set_prec(mp_prec_t prec, mp_rnd_t rnd_mode) { mpfr_prec_round(mp,prec,rnd_mode); -} - -inline void mpreal::set_inf(int sign) -{ - mpfr_set_inf(mp,sign); -} - -inline void mpreal::set_nan() -{ - mpfr_set_nan(mp); + MPREAL_MSVC_DEBUGVIEW_CODE; } inline mp_exp_t mpreal::get_exp () @@ -2096,7 +1497,9 @@ inline mp_exp_t mpreal::get_exp () inline int mpreal::set_exp (mp_exp_t e) { - return mpfr_set_exp(mp,e); + int x = mpfr_set_exp(mp, e); + MPREAL_MSVC_DEBUGVIEW_CODE; + return x; } inline const mpreal frexp(const mpreal& v, mp_exp_t* exp) @@ -2120,16 +1523,24 @@ inline const mpreal machine_epsilon(mp_prec_t prec) { // the smallest eps such that 1.0+eps != 1.0 // depends (of cause) on the precision - mpreal x(1,prec); - return nextabove(x)-x; + return machine_epsilon(mpreal(1,prec)); +} + +inline const mpreal machine_epsilon(const mpreal& x) +{ + if( x < 0) + { + return nextabove(-x)+x; + }else{ + return nextabove(x)-x; + } } inline const mpreal mpreal_min(mp_prec_t prec) { // min = 1/2*2^emin = 2^(emin-1) - - mpreal x(1,prec); - return x <<= mpreal::get_emin()-1; + + return mpreal(1,prec) << mpreal::get_emin()-1; } inline const mpreal mpreal_max(mp_prec_t prec) @@ -2138,8 +1549,25 @@ inline const mpreal mpreal_max(mp_prec_t prec) // and use emax-1 to prevent value to be +inf // max = 2^(emax-1) - mpreal x(1,prec); - return x <<= mpreal::get_emax()-1; + return mpreal(1,prec) << mpreal::get_emax()-1; +} + +inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps) +{ + /* + maxUlps - a and b can be apart by maxUlps binary numbers. + */ + return abs(a - b) <= machine_epsilon((max)(abs(a), abs(b))) * maxUlps; +} + +inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps) +{ + return abs(a - b) <= (min)(abs(a), abs(b)) * eps; +} + +inline bool isEqualFuzzy(const mpreal& a, const mpreal& b) +{ + return isEqualFuzzy(a,b,machine_epsilon((std::min)(abs(a), abs(b)))); } inline const mpreal modf(const mpreal& v, mpreal& n) @@ -2159,7 +1587,9 @@ inline int mpreal::check_range (int t, mp_rnd_t rnd_mode) inline int mpreal::subnormalize (int t,mp_rnd_t rnd_mode) { - return mpfr_subnormalize(mp,t,rnd_mode); + int r = mpfr_subnormalize(mp,t,rnd_mode); + MPREAL_MSVC_DEBUGVIEW_CODE; + return r; } inline mp_exp_t mpreal::get_emin (void) @@ -2234,13 +1664,13 @@ inline const mpreal sqrt(const unsigned int v, mp_rnd_t rnd_mode) inline const mpreal sqrt(const long int v, mp_rnd_t rnd_mode) { if (v>=0) return sqrt(static_cast(v),rnd_mode); - else return mpreal(); // NaN + else return mpreal().setNan(); // NaN } inline const mpreal sqrt(const int v, mp_rnd_t rnd_mode) { if (v>=0) return sqrt(static_cast(v),rnd_mode); - else return mpreal(); // NaN + else return mpreal().setNan(); // NaN } inline const mpreal sqrt(const long double v, mp_rnd_t rnd_mode) @@ -2403,6 +1833,36 @@ inline const mpreal atan (const mpreal& v, mp_rnd_t rnd_mode) return x; } +inline const mpreal acot (const mpreal& v, mp_rnd_t rnd_mode) +{ + return atan(1/v, rnd_mode); +} + +inline const mpreal asec (const mpreal& v, mp_rnd_t rnd_mode) +{ + return acos(1/v, rnd_mode); +} + +inline const mpreal acsc (const mpreal& v, mp_rnd_t rnd_mode) +{ + return asin(1/v, rnd_mode); +} + +inline const mpreal acoth (const mpreal& v, mp_rnd_t rnd_mode) +{ + return atanh(1/v, rnd_mode); +} + +inline const mpreal asech (const mpreal& v, mp_rnd_t rnd_mode) +{ + return acosh(1/v, rnd_mode); +} + +inline const mpreal acsch (const mpreal& v, mp_rnd_t rnd_mode) +{ + return asinh(1/v, rnd_mode); +} + inline const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode) { mpreal a; @@ -2481,6 +1941,36 @@ inline const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode) return x; } +inline const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +{ + mpreal a; + mp_prec_t yp, xp; + + yp = y.get_prec(); + xp = x.get_prec(); + + a.set_prec(yp>xp?yp:xp); + + mpfr_hypot(a.mp, x.mp, y.mp, rnd_mode); + + return a; +} + +inline const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +{ + mpreal a; + mp_prec_t yp, xp; + + yp = y.get_prec(); + xp = x.get_prec(); + + a.set_prec(yp>xp?yp:xp); + + mpfr_remainder(a.mp, x.mp, y.mp, rnd_mode); + + return a; +} + inline const mpreal fac_ui (unsigned long int v, mp_prec_t prec, mp_rnd_t rnd_mode) { mpreal x(0,prec); @@ -2509,11 +1999,15 @@ inline const mpreal eint (const mpreal& v, mp_rnd_t rnd_mode) return x; } -inline const mpreal gamma (const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal gamma (const mpreal& x, mp_rnd_t rnd_mode) { - mpreal x(v); - mpfr_gamma(x.mp,v.mp,rnd_mode); - return x; + mpreal FunctionValue(x); + + // x < 0: gamma(-x) = -pi/(x * gamma(x) * sin(pi*x)) + + mpfr_gamma(FunctionValue.mp, x.mp, rnd_mode); + + return FunctionValue; } inline const mpreal lngamma (const mpreal& v, mp_rnd_t rnd_mode) @@ -2557,42 +2051,42 @@ inline const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode) return x; } -inline const mpreal _j0 (const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode) { mpreal x(v); mpfr_j0(x.mp,v.mp,rnd_mode); return x; } -inline const mpreal _j1 (const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode) { mpreal x(v); mpfr_j1(x.mp,v.mp,rnd_mode); return x; } -inline const mpreal _jn (long n, const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode) { mpreal x(v); mpfr_jn(x.mp,n,v.mp,rnd_mode); return x; } -inline const mpreal _y0 (const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode) { mpreal x(v); mpfr_y0(x.mp,v.mp,rnd_mode); return x; } -inline const mpreal _y1 (const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode) { mpreal x(v); mpfr_y1(x.mp,v.mp,rnd_mode); return x; } -inline const mpreal _yn (long n, const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode) { mpreal x(v); mpfr_yn(x.mp,n,v.mp,rnd_mode); @@ -2780,7 +2274,6 @@ inline void swap(mpreal& a, mpreal& b) mpfr_swap(a.mp,b.mp); } - inline const mpreal (max)(const mpreal& x, const mpreal& y) { return (x>y?x:y); @@ -2835,7 +2328,7 @@ inline const mpreal urandomb (gmp_randstate_t& state) #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) // use gmp_randinit_default() to init state, gmp_randclear() to clear -inline const mpreal urandom (gmp_randstate_t& state,mp_rnd_t rnd_mode) +inline const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode) { mpreal x; mpfr_urandom(x.mp,state,rnd_mode); @@ -2852,6 +2345,34 @@ inline const mpreal random2 (mp_size_t size, mp_exp_t exp) } #endif +// Uniformly distributed random number generation +// a = random(seed); <- initialization & first random number generation +// a = random(); <- next random numbers generation +// seed != 0 +inline const mpreal random(unsigned int seed) +{ + +#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) + static gmp_randstate_t state; + static bool isFirstTime = true; + + if(isFirstTime) + { + gmp_randinit_default(state); + gmp_randseed_ui(state,0); + isFirstTime = false; + } + + if(seed != 0) gmp_randseed_ui(state,seed); + + return mpfr::urandom(state); +#else + if(seed != 0) std::srand(seed); + return mpfr::mpreal(std::rand()/(double)RAND_MAX); +#endif + +} + ////////////////////////////////////////////////////////////////////////// // Set/Get global properties inline void mpreal::set_default_prec(mp_prec_t prec) @@ -2862,7 +2383,7 @@ inline void mpreal::set_default_prec(mp_prec_t prec) inline mp_prec_t mpreal::get_default_prec() { - return mpfr_get_default_prec(); + return (mpfr_get_default_prec)(); } inline void mpreal::set_default_base(int base) @@ -2883,7 +2404,7 @@ inline void mpreal::set_default_rnd(mp_rnd_t rnd_mode) inline mp_rnd_t mpreal::get_default_rnd() { - return mpfr_get_default_rounding_mode(); + return static_cast((mpfr_get_default_rounding_mode)()); } inline void mpreal::set_double_bits(int dbits) @@ -3197,8 +2718,7 @@ inline const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode) { return pow(mpreal(a),static_cast(b),rnd_mode); // mpfr_pow_si } - -} +} // End of mpfr namespace // Explicit specialization of std::swap for mpreal numbers // Thus standard algorithms will use efficient version of swap (due to Koenig lookup) @@ -3212,4 +2732,4 @@ namespace std } } -#endif /* __MP_REAL_H__ */ +#endif /* __MPREAL_H__ */ diff --git a/gtsam/3rdparty/Eigen/unsupported/test/mpreal_support.cpp b/gtsam/3rdparty/Eigen/unsupported/test/mpreal_support.cpp index 53d388821..551af9db8 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/mpreal_support.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/mpreal_support.cpp @@ -2,6 +2,7 @@ #include #include #include +#include using namespace mpfr; using namespace std; @@ -24,6 +25,15 @@ void test_mpreal_support() MatrixXmp B = MatrixXmp::Random(s,s); MatrixXmp S = A.adjoint() * A; MatrixXmp X; + + // Basic stuffs + VERIFY_IS_APPROX(A.real(), A); + VERIFY(Eigen::internal::isApprox(A.array().abs2().sum(), A.squaredNorm())); + VERIFY_IS_APPROX(A.array().exp(), exp(A.array())); + VERIFY_IS_APPROX(A.array().abs2().sqrt(), A.array().abs()); + VERIFY_IS_APPROX(A.array().sin(), sin(A.array())); + VERIFY_IS_APPROX(A.array().cos(), cos(A.array())); + // Cholesky X = S.selfadjointView().llt().solve(B); @@ -39,6 +49,13 @@ void test_mpreal_support() VERIFY_IS_APPROX((S.selfadjointView() * eig.eigenvectors()), eig.eigenvectors() * eig.eigenvalues().asDiagonal()); } + + { + MatrixXmp A(8,3); A.setRandom(); + // test output (interesting things happen in this code) + std::stringstream stream; + stream << A; + } } extern "C" { diff --git a/gtsam/3rdparty/Eigen/unsupported/test/polynomialsolver.cpp b/gtsam/3rdparty/Eigen/unsupported/test/polynomialsolver.cpp index 54b6657c9..28e034179 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/polynomialsolver.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/polynomialsolver.cpp @@ -27,10 +27,6 @@ #include #include -#ifdef HAS_GSL -#include "gsl_helper.h" -#endif - using namespace std; namespace Eigen { @@ -73,32 +69,6 @@ bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve ) cerr << endl; } - #ifdef HAS_GSL - if (internal::is_same< Scalar, double>::value) - { - typedef GslTraits Gsl; - RootsType gslRoots(deg); - Gsl::eigen_poly_solve( pols, gslRoots ); - EvalRootsType gslEvr( deg ); - for( int i=0; i() ); - if( !evalToZero ) - { - if( !gslEvalToZero ){ - cerr << "GSL also failed" << endl; } - else{ - cerr << "GSL did NOT failed" << endl; } - cerr << "GSL roots found: " << gslRoots.transpose() << endl; - cerr << "Abs value of the polynomial at the GSL roots: " << gslEvr.transpose() << endl; - cerr << endl; - } - } - #endif //< HAS_GSL - - std::vector rootModuli( roots.size() ); Map< EvalRootsType > aux( &rootModuli[0], roots.size() ); aux = roots.array().abs(); diff --git a/gtsam/3rdparty/Eigen/unsupported/test/sparse_extra.cpp b/gtsam/3rdparty/Eigen/unsupported/test/sparse_extra.cpp index b1fd481e8..6c5c888ae 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/sparse_extra.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/sparse_extra.cpp @@ -22,7 +22,11 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . -#include "sparse.h" + +// import basic and product tests for deprectaed DynamicSparseMatrix +#define EIGEN_NO_DEPRECATED_WARNING +#include "sparse_basic.cpp" +#include "sparse_product.cpp" #include template @@ -145,10 +149,16 @@ template void sparse_extra(const SparseMatrixType& re void test_sparse_extra() { for(int i = 0; i < g_repeat; i++) { + int s = Eigen::internal::random(1,50); CALL_SUBTEST_1( sparse_extra(SparseMatrix(8, 8)) ); - CALL_SUBTEST_2( sparse_extra(SparseMatrix >(16, 16)) ); - CALL_SUBTEST_1( sparse_extra(SparseMatrix(33, 33)) ); + CALL_SUBTEST_2( sparse_extra(SparseMatrix >(s, s)) ); + CALL_SUBTEST_1( sparse_extra(SparseMatrix(s, s)) ); - CALL_SUBTEST_3( sparse_extra(DynamicSparseMatrix(8, 8)) ); + CALL_SUBTEST_3( sparse_extra(DynamicSparseMatrix(s, s)) ); +// CALL_SUBTEST_3(( sparse_basic(DynamicSparseMatrix(s, s)) )); +// CALL_SUBTEST_3(( sparse_basic(DynamicSparseMatrix(s, s)) )); + + CALL_SUBTEST_3( (sparse_product >()) ); + CALL_SUBTEST_3( (sparse_product >()) ); } } diff --git a/gtsam/3rdparty/Eigen/unsupported/test/sparse_ldlt.cpp b/gtsam/3rdparty/Eigen/unsupported/test/sparse_ldlt.cpp deleted file mode 100644 index 03a26bcd2..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/test/sparse_ldlt.cpp +++ /dev/null @@ -1,175 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2010 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#include "sparse.h" -#include - -#ifdef EIGEN_CHOLMOD_SUPPORT -#include -#endif - -template void sparse_ldlt(int rows, int cols) -{ - static bool odd = true; - odd = !odd; - double density = (std::max)(8./(rows*cols), 0.01); - typedef Matrix DenseMatrix; - typedef Matrix DenseVector; - - SparseMatrix m2(rows, cols); - DenseMatrix refMat2(rows, cols); - - DenseVector b = DenseVector::Random(cols); - DenseVector refX(cols), x(cols); - - initSparse(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, 0, 0); - - SparseMatrix m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows); - DenseMatrix refMat3 = refMat2 * refMat2.adjoint(); - - refX = refMat3.template selfadjointView().ldlt().solve(b); - typedef SparseMatrix SparseSelfAdjointMatrix; - x = b; - SparseLDLT ldlt(m3); - if (ldlt.succeeded()) - ldlt.solveInPlace(x); - else - std::cerr << "warning LDLT failed\n"; - - VERIFY_IS_APPROX(refMat3.template selfadjointView() * x, b); - VERIFY(refX.isApprox(x,test_precision()) && "LDLT: default"); - -#ifdef EIGEN_CHOLMOD_SUPPORT - { - x = b; - SparseLDLT ldlt2(m3); - if (ldlt2.succeeded()) - { - ldlt2.solveInPlace(x); - VERIFY_IS_APPROX(refMat3.template selfadjointView() * x, b); - VERIFY(refX.isApprox(x,test_precision()) && "LDLT: cholmod solveInPlace"); - - x = ldlt2.solve(b); - VERIFY_IS_APPROX(refMat3.template selfadjointView() * x, b); - VERIFY(refX.isApprox(x,test_precision()) && "LDLT: cholmod solve"); - } - else - std::cerr << "warning LDLT failed\n"; - } -#endif - - // new Simplicial LLT - - - // new API - { - SparseMatrix m2(rows, cols); - DenseMatrix refMat2(rows, cols); - - DenseVector b = DenseVector::Random(cols); - DenseVector ref_x(cols), x(cols); - DenseMatrix B = DenseMatrix::Random(rows,cols); - DenseMatrix ref_X(rows,cols), X(rows,cols); - - initSparse(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, 0, 0); - - for(int i=0; i m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows); - DenseMatrix refMat3 = refMat2 * refMat2.adjoint(); - - m3_lo.template selfadjointView().rankUpdate(m2,0); - m3_up.template selfadjointView().rankUpdate(m2,0); - - // with a single vector as the rhs - ref_x = refMat3.template selfadjointView().llt().solve(b); - - x = SimplicialCholesky, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "SimplicialCholesky: solve, full storage, lower, single dense rhs"); - - x = SimplicialCholesky, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "SimplicialCholesky: solve, full storage, upper, single dense rhs"); - - x = SimplicialCholesky, Lower>(m3_lo).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "SimplicialCholesky: solve, lower only, single dense rhs"); - - x = SimplicialCholesky, Upper>(m3_up).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "SimplicialCholesky: solve, upper only, single dense rhs"); - - - // with multiple rhs - ref_X = refMat3.template selfadjointView().llt().solve(B); - - X = SimplicialCholesky, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B); - VERIFY(ref_X.isApprox(X,test_precision()) && "SimplicialCholesky: solve, full storage, lower, multiple dense rhs"); - - X = SimplicialCholesky, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B); - VERIFY(ref_X.isApprox(X,test_precision()) && "SimplicialCholesky: solve, full storage, upper, multiple dense rhs"); - - - // with a sparse rhs -// SparseMatrix spB(rows,cols), spX(rows,cols); -// B.diagonal().array() += 1; -// spB = B.sparseView(0.5,1); -// -// ref_X = refMat3.template selfadjointView().llt().solve(DenseMatrix(spB)); -// -// spX = SimplicialCholesky, Lower>(m3).solve(spB); -// VERIFY(ref_X.isApprox(spX.toDense(),test_precision()) && "LLT: cholmod solve, multiple sparse rhs"); -// -// spX = SimplicialCholesky, Upper>(m3).solve(spB); -// VERIFY(ref_X.isApprox(spX.toDense(),test_precision()) && "LLT: cholmod solve, multiple sparse rhs"); - } - - - -// for(int i=0; i().ldlt().solve(b); -// typedef SparseMatrix SparseSelfAdjointMatrix; -// x = b; -// SparseLDLT ldlt(m2); -// if (ldlt.succeeded()) -// ldlt.solveInPlace(x); -// else -// std::cerr << "warning LDLT failed\n"; -// -// VERIFY_IS_APPROX(refMat2.template selfadjointView() * x, b); -// VERIFY(refX.isApprox(x,test_precision()) && "LDLT: default"); - - -} - -void test_sparse_ldlt() -{ - for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1(sparse_ldlt(8, 8) ); - int s = internal::random(1,300); - CALL_SUBTEST_2(sparse_ldlt >(s,s) ); - CALL_SUBTEST_1(sparse_ldlt(s,s) ); - } -} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/sparse_llt.cpp b/gtsam/3rdparty/Eigen/unsupported/test/sparse_llt.cpp deleted file mode 100644 index 5f8a7ce36..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/test/sparse_llt.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2010 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#include "sparse.h" -#include - -#ifdef EIGEN_CHOLMOD_SUPPORT -#include -#endif - -template void sparse_llt(int rows, int cols) -{ - double density = (std::max)(8./(rows*cols), 0.01); - typedef Matrix DenseMatrix; - typedef Matrix DenseVector; - - // TODO fix the issue with complex (see SparseLLT::solveInPlace) - SparseMatrix m2(rows, cols); - DenseMatrix refMat2(rows, cols); - - DenseVector b = DenseVector::Random(cols); - DenseVector ref_x(cols), x(cols); - DenseMatrix B = DenseMatrix::Random(rows,cols); - DenseMatrix ref_X(rows,cols), X(rows,cols); - - initSparse(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, 0, 0); - - for(int i=0; i().llt().solve(b); - if (!NumTraits::IsComplex) - { - x = b; - SparseLLT > (m2).solveInPlace(x); - VERIFY(ref_x.isApprox(x,test_precision()) && "LLT: default"); - } - -#ifdef EIGEN_CHOLMOD_SUPPORT - // legacy API - { - // Cholmod, as configured in CholmodSupport.h, only supports self-adjoint matrices - SparseMatrix m3 = m2.adjoint()*m2; - DenseMatrix refMat3 = refMat2.adjoint()*refMat2; - - ref_x = refMat3.template selfadjointView().llt().solve(b); - - x = b; - SparseLLT, Cholmod>(m3).solveInPlace(x); - VERIFY((m3*x).isApprox(b,test_precision()) && "LLT legacy: cholmod solveInPlace"); - - x = SparseLLT, Cholmod>(m3).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "LLT legacy: cholmod solve"); - } - - // new API - { - // Cholmod, as configured in CholmodSupport.h, only supports self-adjoint matrices - SparseMatrix m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows); - DenseMatrix refMat3 = refMat2 * refMat2.adjoint(); - - m3_lo.template selfadjointView().rankUpdate(m2,0); - m3_up.template selfadjointView().rankUpdate(m2,0); - - // with a single vector as the rhs - ref_x = refMat3.template selfadjointView().llt().solve(b); - - x = CholmodDecomposition, Lower>(m3).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "LLT: cholmod solve, single dense rhs"); - - x = CholmodDecomposition, Upper>(m3).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "LLT: cholmod solve, single dense rhs"); - - x = CholmodDecomposition, Lower>(m3_lo).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "LLT: cholmod solve, single dense rhs"); - - x = CholmodDecomposition, Upper>(m3_up).solve(b); - VERIFY(ref_x.isApprox(x,test_precision()) && "LLT: cholmod solve, single dense rhs"); - - - // with multiple rhs - ref_X = refMat3.template selfadjointView().llt().solve(B); - - #ifndef EIGEN_DEFAULT_TO_ROW_MAJOR - // TODO make sure the API is properly documented about this fact - X = CholmodDecomposition, Lower>(m3).solve(B); - VERIFY(ref_X.isApprox(X,test_precision()) && "LLT: cholmod solve, multiple dense rhs"); - - X = CholmodDecomposition, Upper>(m3).solve(B); - VERIFY(ref_X.isApprox(X,test_precision()) && "LLT: cholmod solve, multiple dense rhs"); - #endif - - - // with a sparse rhs - SparseMatrix spB(rows,cols), spX(rows,cols); - B.diagonal().array() += 1; - spB = B.sparseView(0.5,1); - - ref_X = refMat3.template selfadjointView().llt().solve(DenseMatrix(spB)); - - spX = CholmodDecomposition, Lower>(m3).solve(spB); - VERIFY(ref_X.isApprox(spX.toDense(),test_precision()) && "LLT: cholmod solve, multiple sparse rhs"); - - spX = CholmodDecomposition, Upper>(m3).solve(spB); - VERIFY(ref_X.isApprox(spX.toDense(),test_precision()) && "LLT: cholmod solve, multiple sparse rhs"); - } -#endif - -} - -void test_sparse_llt() -{ - for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1(sparse_llt(8, 8) ); - int s = internal::random(1,300); - CALL_SUBTEST_2(sparse_llt >(s,s) ); - CALL_SUBTEST_1(sparse_llt(s,s) ); - } -} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/sparse_lu.cpp b/gtsam/3rdparty/Eigen/unsupported/test/sparse_lu.cpp deleted file mode 100644 index d58e85a0a..000000000 --- a/gtsam/3rdparty/Eigen/unsupported/test/sparse_lu.cpp +++ /dev/null @@ -1,113 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2010 Gael Guennebaud -// -// Eigen is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 3 of the License, or (at your option) any later version. -// -// Alternatively, you can redistribute it and/or -// modify it under the terms of the GNU General Public License as -// published by the Free Software Foundation; either version 2 of -// the License, or (at your option) any later version. -// -// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY -// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License and a copy of the GNU General Public License along with -// Eigen. If not, see . - -#include "sparse.h" -#include - -#ifdef EIGEN_UMFPACK_SUPPORT -#include -#endif - -#ifdef EIGEN_SUPERLU_SUPPORT -#include -#endif - -template void sparse_lu(int rows, int cols) -{ - double density = (std::max)(8./(rows*cols), 0.01); - typedef Matrix DenseMatrix; - typedef Matrix DenseVector; - - DenseVector vec1 = DenseVector::Random(rows); - - std::vector zeroCoords; - std::vector nonzeroCoords; - - SparseMatrix m2(rows, cols); - DenseMatrix refMat2(rows, cols); - - DenseVector b = DenseVector::Random(cols); - DenseVector refX(cols), x(cols); - - initSparse(density, refMat2, m2, ForceNonZeroDiag, &zeroCoords, &nonzeroCoords); - - FullPivLU refLu(refMat2); - refX = refLu.solve(b); - #if defined(EIGEN_SUPERLU_SUPPORT) || defined(EIGEN_UMFPACK_SUPPORT) - Scalar refDet = refLu.determinant(); - #endif - x.setZero(); - // // SparseLU > (m2).solve(b,&x); - // // VERIFY(refX.isApprox(x,test_precision()) && "LU: default"); - - #ifdef EIGEN_UMFPACK_SUPPORT - { - // check solve - x.setZero(); - SparseLU,UmfPack> lu(m2); - VERIFY(lu.succeeded() && "umfpack LU decomposition failed"); - VERIFY(lu.solve(b,&x) && "umfpack LU solving failed"); - VERIFY(refX.isApprox(x,test_precision()) && "LU: umfpack"); - VERIFY_IS_APPROX(refDet,lu.determinant()); - // TODO check the extracted data - //std::cerr << slu.matrixL() << "\n"; - } - #endif - - #ifdef EIGEN_SUPERLU_SUPPORT - { - x.setZero(); - SparseLU,SuperLU> slu(m2); - if (slu.succeeded()) - { - if (slu.solve(b,&x)) { - VERIFY(refX.isApprox(x,test_precision()) && "LU: SuperLU"); - } - // std::cerr << refDet << " == " << slu.determinant() << "\n"; - if (slu.solve(b, &x, SvTranspose)) { - VERIFY(b.isApprox(m2.transpose() * x, test_precision())); - } - - if (slu.solve(b, &x, SvAdjoint)) { - VERIFY(b.isApprox(m2.adjoint() * x, test_precision())); - } - - if (!NumTraits::IsComplex) { - VERIFY_IS_APPROX(refDet,slu.determinant()); // FIXME det is not very stable for complex - } - } - } - #endif - -} - -void test_sparse_lu() -{ - for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1(sparse_lu(8, 8) ); - int s = internal::random(1,300); - CALL_SUBTEST_2(sparse_lu >(s,s) ); - CALL_SUBTEST_1(sparse_lu(s,s) ); - } -} diff --git a/gtsam/3rdparty/Eigen/unsupported/test/splines.cpp b/gtsam/3rdparty/Eigen/unsupported/test/splines.cpp new file mode 100644 index 000000000..fe98bf183 --- /dev/null +++ b/gtsam/3rdparty/Eigen/unsupported/test/splines.cpp @@ -0,0 +1,255 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010-2011 Hauke Heibel +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#include "main.h" + +#include + +// lets do some explicit instantiations and thus +// force the compilation of all spline functions... +template class Spline; +template class Spline; + +template class Spline; +template class Spline; +template class Spline; +template class Spline; + +template class Spline; +template class Spline; + +template class Spline; +template class Spline; +template class Spline; +template class Spline; + +Spline closed_spline2d() +{ + RowVectorXd knots(12); + knots << 0, + 0, + 0, + 0, + 0.867193179093898, + 1.660330955342408, + 2.605084834823134, + 3.484154586374428, + 4.252699478956276, + 4.252699478956276, + 4.252699478956276, + 4.252699478956276; + + MatrixXd ctrls(8,2); + ctrls << -0.370967741935484, 0.236842105263158, + -0.231401860693277, 0.442245185027632, + 0.344361228532831, 0.773369994120753, + 0.828990216203802, 0.106550882647595, + 0.407270163678382, -1.043452922172848, + -0.488467813584053, -0.390098582530090, + -0.494657189446427, 0.054804824897884, + -0.370967741935484, 0.236842105263158; + ctrls.transposeInPlace(); + + return Spline(knots, ctrls); +} + +/* create a reference spline */ +Spline spline3d() +{ + RowVectorXd knots(11); + knots << 0, + 0, + 0, + 0.118997681558377, + 0.162611735194631, + 0.498364051982143, + 0.655098003973841, + 0.679702676853675, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000; + + MatrixXd ctrls(8,3); + ctrls << 0.959743958516081, 0.340385726666133, 0.585267750979777, + 0.223811939491137, 0.751267059305653, 0.255095115459269, + 0.505957051665142, 0.699076722656686, 0.890903252535799, + 0.959291425205444, 0.547215529963803, 0.138624442828679, + 0.149294005559057, 0.257508254123736, 0.840717255983663, + 0.254282178971531, 0.814284826068816, 0.243524968724989, + 0.929263623187228, 0.349983765984809, 0.196595250431208, + 0.251083857976031, 0.616044676146639, 0.473288848902729; + ctrls.transposeInPlace(); + + return Spline(knots, ctrls); +} + +/* compares evaluations against known results */ +void eval_spline3d() +{ + Spline3d spline = spline3d(); + + RowVectorXd u(10); + u << 0.351659507062997, + 0.830828627896291, + 0.585264091152724, + 0.549723608291140, + 0.917193663829810, + 0.285839018820374, + 0.757200229110721, + 0.753729094278495, + 0.380445846975357, + 0.567821640725221; + + MatrixXd pts(10,3); + pts << 0.707620811535916, 0.510258911240815, 0.417485437023409, + 0.603422256426978, 0.529498282727551, 0.270351549348981, + 0.228364197569334, 0.423745615677815, 0.637687289287490, + 0.275556796335168, 0.350856706427970, 0.684295784598905, + 0.514519311047655, 0.525077224890754, 0.351628308305896, + 0.724152914315666, 0.574461155457304, 0.469860285484058, + 0.529365063753288, 0.613328702656816, 0.237837040141739, + 0.522469395136878, 0.619099658652895, 0.237139665242069, + 0.677357023849552, 0.480655768435853, 0.422227610314397, + 0.247046593173758, 0.380604672404750, 0.670065791405019; + pts.transposeInPlace(); + + for (int i=0; i::Interpolate(points,3); + + for (Eigen::DenseIndex i=0; i::Interpolate(points,3,chord_lengths); + + for (Eigen::DenseIndex i=0; i