From fe126f2e42294e19e2dbb958b0fd009d9512ae9a Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 2 May 2011 20:44:52 +0000 Subject: [PATCH] Upgraded to released version of Eigen 3.0 (was using beta before) --- gtsam/3rdparty/Eigen/Array | 13 +- gtsam/3rdparty/Eigen/CMakeLists.txt | 8 +- gtsam/3rdparty/Eigen/Cholesky | 4 +- gtsam/3rdparty/Eigen/Core | 95 ++- gtsam/3rdparty/Eigen/Eigen | 2 +- gtsam/3rdparty/Eigen/Eigen2Support | 8 +- gtsam/3rdparty/Eigen/Eigenvalues | 4 +- gtsam/3rdparty/Eigen/Geometry | 39 +- gtsam/3rdparty/Eigen/Householder | 4 +- gtsam/3rdparty/Eigen/Jacobi | 4 +- gtsam/3rdparty/Eigen/LU | 8 +- gtsam/3rdparty/Eigen/LeastSquares | 36 + gtsam/3rdparty/Eigen/QR | 10 +- gtsam/3rdparty/Eigen/QtAlignedMalloc | 12 +- gtsam/3rdparty/Eigen/SVD | 8 +- gtsam/3rdparty/Eigen/Sparse | 12 +- gtsam/3rdparty/Eigen/src/CMakeLists.txt | 3 +- gtsam/3rdparty/Eigen/src/Cholesky/LDLT.h | 90 +- gtsam/3rdparty/Eigen/src/Cholesky/LLT.h | 93 +- gtsam/3rdparty/Eigen/src/Core/Array.h | 25 +- gtsam/3rdparty/Eigen/src/Core/ArrayBase.h | 50 +- gtsam/3rdparty/Eigen/src/Core/ArrayWrapper.h | 37 +- gtsam/3rdparty/Eigen/src/Core/Assign.h | 165 ++-- gtsam/3rdparty/Eigen/src/Core/BandMatrix.h | 283 +++++-- gtsam/3rdparty/Eigen/src/Core/Block.h | 116 +-- gtsam/3rdparty/Eigen/src/Core/BooleanRedux.h | 24 +- .../Eigen/src/Core/CommaInitializer.h | 14 +- gtsam/3rdparty/Eigen/src/Core/CwiseBinaryOp.h | 77 +- .../3rdparty/Eigen/src/Core/CwiseNullaryOp.h | 77 +- gtsam/3rdparty/Eigen/src/Core/CwiseUnaryOp.h | 29 +- .../3rdparty/Eigen/src/Core/CwiseUnaryView.h | 41 +- gtsam/3rdparty/Eigen/src/Core/DenseBase.h | 211 +++-- .../3rdparty/Eigen/src/Core/DenseCoeffsBase.h | 223 +++-- .../Core/{MatrixStorage.h => DenseStorage.h} | 184 ++-- gtsam/3rdparty/Eigen/src/Core/Diagonal.h | 64 +- .../3rdparty/Eigen/src/Core/DiagonalMatrix.h | 57 +- .../3rdparty/Eigen/src/Core/DiagonalProduct.h | 28 +- gtsam/3rdparty/Eigen/src/Core/Dot.h | 104 ++- gtsam/3rdparty/Eigen/src/Core/EigenBase.h | 8 +- gtsam/3rdparty/Eigen/src/Core/Flagged.h | 32 +- .../Eigen/src/Core/ForceAlignedAccess.h | 13 +- gtsam/3rdparty/Eigen/src/Core/Functors.h | 623 ++++++++------ gtsam/3rdparty/Eigen/src/Core/Fuzzy.h | 210 ++--- .../Eigen/src/Core/GenericPacketMath.h | 137 +-- .../3rdparty/Eigen/src/Core/GlobalFunctions.h | 51 +- gtsam/3rdparty/Eigen/src/Core/IO.h | 27 +- gtsam/3rdparty/Eigen/src/Core/Map.h | 54 +- gtsam/3rdparty/Eigen/src/Core/MapBase.h | 182 ++-- gtsam/3rdparty/Eigen/src/Core/MathFunctions.h | 506 +++++------ gtsam/3rdparty/Eigen/src/Core/Matrix.h | 45 +- gtsam/3rdparty/Eigen/src/Core/MatrixBase.h | 197 +++-- gtsam/3rdparty/Eigen/src/Core/NestByValue.h | 9 +- gtsam/3rdparty/Eigen/src/Core/NoAlias.h | 18 +- gtsam/3rdparty/Eigen/src/Core/NumTraits.h | 20 +- .../Eigen/src/Core/PermutationMatrix.h | 630 +++++++++----- .../{DenseStorageBase.h => PlainObjectBase.h} | 249 ++++-- gtsam/3rdparty/Eigen/src/Core/Product.h | 299 ++++--- gtsam/3rdparty/Eigen/src/Core/ProductBase.h | 109 ++- gtsam/3rdparty/Eigen/src/Core/Random.h | 29 +- gtsam/3rdparty/Eigen/src/Core/Redux.h | 120 +-- gtsam/3rdparty/Eigen/src/Core/Replicate.h | 37 +- gtsam/3rdparty/Eigen/src/Core/ReturnByValue.h | 31 +- gtsam/3rdparty/Eigen/src/Core/Reverse.h | 46 +- gtsam/3rdparty/Eigen/src/Core/Select.h | 26 +- .../3rdparty/Eigen/src/Core/SelfAdjointView.h | 99 ++- .../Eigen/src/Core/SelfCwiseBinaryOp.h | 49 +- .../3rdparty/Eigen/src/Core/SolveTriangular.h | 293 +++---- gtsam/3rdparty/Eigen/src/Core/StableNorm.h | 48 +- gtsam/3rdparty/Eigen/src/Core/Stride.h | 10 +- gtsam/3rdparty/Eigen/src/Core/Swap.h | 42 +- gtsam/3rdparty/Eigen/src/Core/Transpose.h | 145 ++-- .../3rdparty/Eigen/src/Core/Transpositions.h | 330 ++++++-- .../Eigen/src/Core/TriangularMatrix.h | 301 +++++-- gtsam/3rdparty/Eigen/src/Core/VectorBlock.h | 75 +- gtsam/3rdparty/Eigen/src/Core/VectorwiseOp.h | 135 +-- gtsam/3rdparty/Eigen/src/Core/Visitor.h | 65 +- .../Eigen/src/Core/arch/AltiVec/Complex.h | 139 +-- .../Eigen/src/Core/arch/AltiVec/PacketMath.h | 209 ++--- .../Eigen/src/Core/arch/Default/Settings.h | 9 - .../Eigen/src/Core/arch/NEON/Complex.h | 95 ++- .../Eigen/src/Core/arch/NEON/PacketMath.h | 158 ++-- .../Eigen/src/Core/arch/SSE/Complex.h | 269 +++--- .../Eigen/src/Core/arch/SSE/MathFunctions.h | 224 ++--- .../Eigen/src/Core/arch/SSE/PacketMath.h | 373 ++++---- .../src/Core/products/CoeffBasedProduct.h | 153 ++-- .../Core/products/GeneralBlockPanelKernel.h | 451 +++++----- .../src/Core/products/GeneralMatrixMatrix.h | 123 +-- .../products/GeneralMatrixMatrixTriangular.h | 227 +++++ .../src/Core/products/GeneralMatrixVector.h | 220 ++--- .../Eigen/src/Core/products/Parallelizer.h | 22 +- .../Core/products/SelfadjointMatrixMatrix.h | 108 +-- .../Core/products/SelfadjointMatrixVector.h | 182 ++-- .../src/Core/products/SelfadjointProduct.h | 236 ++---- .../Core/products/SelfadjointRank2Update.h | 52 +- .../Core/products/TriangularMatrixMatrix.h | 67 +- .../Core/products/TriangularMatrixVector.h | 312 +++++-- .../Core/products/TriangularSolverMatrix.h | 42 +- .../Core/products/TriangularSolverVector.h | 150 ++++ gtsam/3rdparty/Eigen/src/Core/util/BlasUtil.h | 149 ++-- .../3rdparty/Eigen/src/Core/util/Constants.h | 49 +- .../Eigen/src/Core/util/DisableMSVCWarnings.h | 16 - .../src/Core/util/DisableStupidWarnings.h | 42 + .../Eigen/src/Core/util/EnableMSVCWarnings.h | 4 - .../Eigen/src/Core/util/ForwardDeclarations.h | 211 +++-- gtsam/3rdparty/Eigen/src/Core/util/Macros.h | 214 +++-- gtsam/3rdparty/Eigen/src/Core/util/Memory.h | 283 ++++--- gtsam/3rdparty/Eigen/src/Core/util/Meta.h | 176 ++-- .../src/Core/util/ReenableStupidWarnings.h | 14 + .../Eigen/src/Core/util/StaticAssert.h | 42 +- .../3rdparty/Eigen/src/Core/util/XprHelper.h | 334 ++++---- .../3rdparty/Eigen/src/Eigen2Support/Block.h | 8 +- .../Eigen/src/Eigen2Support/CMakeLists.txt | 2 + .../3rdparty/Eigen/src/Eigen2Support/Cwise.h | 45 +- .../Eigen/src/Eigen2Support/CwiseOperators.h | 38 +- .../src/Eigen2Support/Geometry/AlignedBox.h | 170 ++++ .../Eigen/src/Eigen2Support/Geometry/All.h | 115 +++ .../src/Eigen2Support/Geometry/AngleAxis.h | 226 +++++ .../src/Eigen2Support/Geometry/CMakeLists.txt | 6 + .../src/Eigen2Support/Geometry/Hyperplane.h | 265 ++++++ .../Eigen2Support/Geometry/ParametrizedLine.h | 153 ++++ .../src/Eigen2Support/Geometry/Quaternion.h | 506 +++++++++++ .../src/Eigen2Support/Geometry/Rotation2D.h | 157 ++++ .../src/Eigen2Support/Geometry/RotationBase.h | 134 +++ .../src/Eigen2Support/Geometry/Scaling.h | 179 ++++ .../src/Eigen2Support/Geometry/Transform.h | 798 ++++++++++++++++++ .../src/Eigen2Support/Geometry/Translation.h | 196 +++++ gtsam/3rdparty/Eigen/src/Eigen2Support/LU.h | 133 +++ .../Eigen/src/Eigen2Support/LeastSquares.h | 182 ++++ .../3rdparty/Eigen/src/Eigen2Support/Macros.h | 35 + .../Eigen/src/Eigen2Support/MathFunctions.h | 68 ++ .../3rdparty/Eigen/src/Eigen2Support/Memory.h | 58 ++ gtsam/3rdparty/Eigen/src/Eigen2Support/Meta.h | 86 ++ .../3rdparty/Eigen/src/Eigen2Support/Minor.h | 15 +- gtsam/3rdparty/Eigen/src/Eigen2Support/QR.h | 79 ++ gtsam/3rdparty/Eigen/src/Eigen2Support/SVD.h | 649 ++++++++++++++ .../src/Eigen2Support/TriangularSolver.h | 4 +- .../Eigen/src/Eigen2Support/VectorBlock.h | 22 +- .../src/Eigenvalues/ComplexEigenSolver.h | 15 +- .../Eigen/src/Eigenvalues/ComplexSchur.h | 63 +- .../Eigen/src/Eigenvalues/EigenSolver.h | 62 +- .../GeneralizedSelfAdjointEigenSolver.h | 17 +- .../src/Eigenvalues/HessenbergDecomposition.h | 31 +- .../src/Eigenvalues/MatrixBaseEigenvalues.h | 14 +- .../Eigen/src/Eigenvalues/RealSchur.h | 33 +- .../src/Eigenvalues/SelfAdjointEigenSolver.h | 130 ++- .../src/Eigenvalues/Tridiagonalization.h | 177 ++-- .../3rdparty/Eigen/src/Geometry/AlignedBox.h | 30 +- gtsam/3rdparty/Eigen/src/Geometry/AngleAxis.h | 23 +- .../3rdparty/Eigen/src/Geometry/EulerAngles.h | 16 +- .../3rdparty/Eigen/src/Geometry/Homogeneous.h | 142 ++-- .../3rdparty/Eigen/src/Geometry/Hyperplane.h | 43 +- .../Eigen/src/Geometry/OrthoMethods.h | 98 ++- .../Eigen/src/Geometry/ParametrizedLine.h | 44 +- .../3rdparty/Eigen/src/Geometry/Quaternion.h | 155 ++-- .../3rdparty/Eigen/src/Geometry/Rotation2D.h | 18 +- .../Eigen/src/Geometry/RotationBase.h | 36 +- gtsam/3rdparty/Eigen/src/Geometry/Scaling.h | 14 +- gtsam/3rdparty/Eigen/src/Geometry/Transform.h | 471 ++++++----- .../3rdparty/Eigen/src/Geometry/Translation.h | 12 +- gtsam/3rdparty/Eigen/src/Geometry/Umeyama.h | 55 +- .../Eigen/src/Geometry/arch/Geometry_SSE.h | 59 +- .../Eigen/src/Householder/BlockHouseholder.h | 11 +- .../Eigen/src/Householder/Householder.h | 22 +- .../src/Householder/HouseholderSequence.h | 290 +++++-- gtsam/3rdparty/Eigen/src/Jacobi/Jacobi.h | 185 ++-- gtsam/3rdparty/Eigen/src/LU/Determinant.h | 54 +- gtsam/3rdparty/Eigen/src/LU/FullPivLU.h | 91 +- gtsam/3rdparty/Eigen/src/LU/Inverse.h | 152 ++-- gtsam/3rdparty/Eigen/src/LU/PartialPivLU.h | 144 ++-- .../3rdparty/Eigen/src/LU/arch/Inverse_SSE.h | 8 +- .../Eigen/src/QR/ColPivHouseholderQR.h | 81 +- .../Eigen/src/QR/FullPivHouseholderQR.h | 194 +++-- gtsam/3rdparty/Eigen/src/QR/HouseholderQR.h | 80 +- gtsam/3rdparty/Eigen/src/SVD/JacobiSVD.h | 319 ++++--- .../Eigen/src/SVD/UpperBidiagonalization.h | 30 +- gtsam/3rdparty/Eigen/src/Sparse/AmbiVector.h | 16 +- .../Eigen/src/Sparse/CompressedStorage.h | 2 +- .../Eigen/src/Sparse/DynamicSparseMatrix.h | 45 +- .../Eigen/src/Sparse/MappedSparseMatrix.h | 10 +- gtsam/3rdparty/Eigen/src/Sparse/SparseBlock.h | 54 +- .../Eigen/src/Sparse/SparseCwiseBinaryOp.h | 85 +- .../Eigen/src/Sparse/SparseCwiseUnaryOp.h | 16 +- .../Eigen/src/Sparse/SparseDenseProduct.h | 50 +- .../Eigen/src/Sparse/SparseDiagonalProduct.h | 75 +- gtsam/3rdparty/Eigen/src/Sparse/SparseDot.h | 26 +- gtsam/3rdparty/Eigen/src/Sparse/SparseFuzzy.h | 4 +- .../3rdparty/Eigen/src/Sparse/SparseMatrix.h | 118 ++- .../Eigen/src/Sparse/SparseMatrixBase.h | 144 ++-- .../3rdparty/Eigen/src/Sparse/SparseProduct.h | 35 +- gtsam/3rdparty/Eigen/src/Sparse/SparseRedux.h | 12 +- .../Eigen/src/Sparse/SparseSelfAdjointView.h | 262 +++++- .../Eigen/src/Sparse/SparseSparseProduct.h | 125 +-- .../Eigen/src/Sparse/SparseTranspose.h | 2 +- .../Eigen/src/Sparse/SparseTriangularView.h | 24 +- gtsam/3rdparty/Eigen/src/Sparse/SparseUtil.h | 36 +- .../3rdparty/Eigen/src/Sparse/SparseVector.h | 63 +- gtsam/3rdparty/Eigen/src/Sparse/SparseView.h | 12 +- .../Eigen/src/Sparse/TriangularSolver.h | 76 +- gtsam/3rdparty/Eigen/src/StlSupport/details.h | 94 ++- gtsam/3rdparty/Eigen/src/misc/Image.h | 22 +- gtsam/3rdparty/Eigen/src/misc/Kernel.h | 22 +- gtsam/3rdparty/Eigen/src/misc/Solve.h | 24 +- .../Eigen/src/plugins/ArrayCwiseBinaryOps.h | 18 +- .../Eigen/src/plugins/ArrayCwiseUnaryOps.h | 73 +- .../3rdparty/Eigen/src/plugins/BlockMethods.h | 114 +-- .../Eigen/src/plugins/CommonCwiseBinaryOps.h | 8 +- .../Eigen/src/plugins/CommonCwiseUnaryOps.h | 70 +- .../Eigen/src/plugins/MatrixCwiseBinaryOps.h | 20 +- .../Eigen/src/plugins/MatrixCwiseUnaryOps.h | 12 +- gtsam/linear/HessianFactor.cpp | 13 +- 210 files changed, 15156 insertions(+), 7291 deletions(-) create mode 100644 gtsam/3rdparty/Eigen/LeastSquares rename gtsam/3rdparty/Eigen/src/Core/{MatrixStorage.h => DenseStorage.h} (50%) rename gtsam/3rdparty/Eigen/src/Core/{DenseStorageBase.h => PlainObjectBase.h} (65%) create mode 100644 gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h create mode 100644 gtsam/3rdparty/Eigen/src/Core/products/TriangularSolverVector.h delete mode 100644 gtsam/3rdparty/Eigen/src/Core/util/DisableMSVCWarnings.h create mode 100644 gtsam/3rdparty/Eigen/src/Core/util/DisableStupidWarnings.h delete mode 100644 gtsam/3rdparty/Eigen/src/Core/util/EnableMSVCWarnings.h create mode 100644 gtsam/3rdparty/Eigen/src/Core/util/ReenableStupidWarnings.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AlignedBox.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/All.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AngleAxis.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Hyperplane.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Quaternion.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Rotation2D.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/RotationBase.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Scaling.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Transform.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Translation.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/LU.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/LeastSquares.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Macros.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/MathFunctions.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Memory.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/Meta.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/QR.h create mode 100644 gtsam/3rdparty/Eigen/src/Eigen2Support/SVD.h diff --git a/gtsam/3rdparty/Eigen/Array b/gtsam/3rdparty/Eigen/Array index 6eaa0f978..3d004fb69 100644 --- a/gtsam/3rdparty/Eigen/Array +++ b/gtsam/3rdparty/Eigen/Array @@ -1,14 +1,11 @@ #ifndef EIGEN_ARRAY_MODULE_H #define EIGEN_ARRAY_MODULE_H -#ifdef _MSC_VER -#pragma message("The inclusion of Eigen/Array is deprecated. \ -The array module is available as soon as Eigen/Core is included.") -#elif __GNUC__ -#warning "The inclusion of Eigen/Array is deprecated. \ -The array module is available as soon as Eigen/Core is included." -#endif - +// include Core first to handle Eigen2 support macros #include "Core" +#ifndef EIGEN2_SUPPORT + #error The Eigen/Array header does no longer exist in Eigen3. All that functionality has moved to Eigen/Core. +#endif + #endif // EIGEN_ARRAY_MODULE_H diff --git a/gtsam/3rdparty/Eigen/CMakeLists.txt b/gtsam/3rdparty/Eigen/CMakeLists.txt index 2d96726d4..a92dd6f6c 100644 --- a/gtsam/3rdparty/Eigen/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/CMakeLists.txt @@ -1,6 +1,12 @@ +include(RegexUtils) +test_escape_string_as_regex() + file(GLOB Eigen_directory_files "*") + +escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") + foreach(f ${Eigen_directory_files}) - if(NOT f MATCHES ".txt" AND NOT f MATCHES "${CMAKE_CURRENT_SOURCE_DIR}/src") + if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src") list(APPEND Eigen_directory_files_to_install ${f}) endif() endforeach(f ${Eigen_directory_files}) diff --git a/gtsam/3rdparty/Eigen/Cholesky b/gtsam/3rdparty/Eigen/Cholesky index b6c83e0ef..53f7bf911 100644 --- a/gtsam/3rdparty/Eigen/Cholesky +++ b/gtsam/3rdparty/Eigen/Cholesky @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" namespace Eigen { @@ -27,7 +27,7 @@ namespace Eigen { } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CHOLESKY_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/Core b/gtsam/3rdparty/Eigen/Core index 6951babd0..7f384662e 100644 --- a/gtsam/3rdparty/Eigen/Core +++ b/gtsam/3rdparty/Eigen/Core @@ -2,7 +2,7 @@ // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud -// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2007-2011 Benoit Jacob // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -26,21 +26,13 @@ #ifndef EIGEN_CORE_H #define EIGEN_CORE_H -#define EIGEN_NO_STATIC_ASSERT - -// first thing Eigen does: prevent MSVC from committing suicide -#include "src/Core/util/DisableMSVCWarnings.h" +// first thing Eigen does: stop the compiler from committing suicide +#include "src/Core/util/DisableStupidWarnings.h" // then include this file where all our macros are defined. It's really important to do it first because // it's where we do all the alignment settings (platform detection and honoring the user's will if he // defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization. -#ifndef EIGEN_PARSED_BY_DOXYGEN - #include "src/Core/util/Macros.h" -#else - namespace Eigen { // for some reason Doxygen needs this namespace - #include "src/Core/util/Macros.h" - } -#endif +#include "src/Core/util/Macros.h" // if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into // account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks @@ -95,28 +87,30 @@ #endif // include files - #if (defined __GNUC__) && (defined __MINGW32__) - #include - //including intrin.h works around a MINGW bug http://sourceforge.net/tracker/?func=detail&atid=102435&aid=2962480&group_id=2435 - //in essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do). However, - //intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations with conflicting linkage. The linkage for intrinsics - //doesn't matter, but at that stage the compiler doesn't know; so, to avoid compile errors when windows.h is included after Eigen/Core, - //include intrin here. - #endif - #include - #include - #ifdef EIGEN_VECTORIZE_SSE3 + + // This extern "C" works around a MINGW-w64 compilation issue + // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354 + // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do). + // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations + // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know; + // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too. + // notice that since these are C headers, the extern "C" is theoretically needed anyways. + extern "C" { + #include + #include + #ifdef EIGEN_VECTORIZE_SSE3 #include - #endif - #ifdef EIGEN_VECTORIZE_SSSE3 + #endif + #ifdef EIGEN_VECTORIZE_SSSE3 #include - #endif - #ifdef EIGEN_VECTORIZE_SSE4_1 + #endif + #ifdef EIGEN_VECTORIZE_SSE4_1 #include - #endif - #ifdef EIGEN_VECTORIZE_SSE4_2 + #endif + #ifdef EIGEN_VECTORIZE_SSE4_2 #include - #endif + #endif + } // end extern "C" #elif defined __ALTIVEC__ #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_ALTIVEC @@ -158,16 +152,17 @@ #include #include #include +#include // for CHAR_BIT // for min/max: #include // for outputting debug info #ifdef EIGEN_DEBUG_ASSIGN -#include +#include #endif // required for __cpuid, needs to be included after cmath -#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_IX64)) +#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) #include #endif @@ -211,6 +206,32 @@ inline static const char *SimdInstructionSetsInUse(void) { #endif } +#define STAGE10_FULL_EIGEN2_API 10 +#define STAGE20_RESOLVE_API_CONFLICTS 20 +#define STAGE30_FULL_EIGEN3_API 30 +#define STAGE40_FULL_EIGEN3_STRICTNESS 40 +#define STAGE99_NO_EIGEN2_SUPPORT 99 + +#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE40_FULL_EIGEN3_STRICTNESS +#elif defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API +#elif defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE20_RESOLVE_API_CONFLICTS +#elif defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE10_FULL_EIGEN2_API +#elif defined EIGEN2_SUPPORT + // default to stage 3, that's what it's always meant + #define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API + #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API +#else + #define EIGEN2_SUPPORT_STAGE STAGE99_NO_EIGEN2_SUPPORT +#endif + #ifdef EIGEN2_SUPPORT #undef minor #endif @@ -266,13 +287,14 @@ using std::size_t; #endif #include "src/Core/util/BlasUtil.h" -#include "src/Core/MatrixStorage.h" +#include "src/Core/DenseStorage.h" #include "src/Core/NestByValue.h" #include "src/Core/ForceAlignedAccess.h" #include "src/Core/ReturnByValue.h" #include "src/Core/NoAlias.h" -#include "src/Core/DenseStorageBase.h" +#include "src/Core/PlainObjectBase.h" #include "src/Core/Matrix.h" +#include "src/Core/Array.h" #include "src/Core/CwiseBinaryOp.h" #include "src/Core/CwiseUnaryOp.h" #include "src/Core/CwiseNullaryOp.h" @@ -308,6 +330,7 @@ using std::size_t; #include "src/Core/products/GeneralBlockPanelKernel.h" #include "src/Core/products/GeneralMatrixVector.h" #include "src/Core/products/GeneralMatrixMatrix.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular.h" #include "src/Core/products/SelfadjointMatrixVector.h" #include "src/Core/products/SelfadjointMatrixMatrix.h" #include "src/Core/products/SelfadjointProduct.h" @@ -315,6 +338,7 @@ using std::size_t; #include "src/Core/products/TriangularMatrixVector.h" #include "src/Core/products/TriangularMatrixMatrix.h" #include "src/Core/products/TriangularSolverMatrix.h" +#include "src/Core/products/TriangularSolverVector.h" #include "src/Core/BandMatrix.h" #include "src/Core/BooleanRedux.h" @@ -325,13 +349,12 @@ using std::size_t; #include "src/Core/Reverse.h" #include "src/Core/ArrayBase.h" #include "src/Core/ArrayWrapper.h" -#include "src/Core/Array.h" } // namespace Eigen #include "src/Core/GlobalFunctions.h" -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #ifdef EIGEN2_SUPPORT #include "Eigen2Support" diff --git a/gtsam/3rdparty/Eigen/Eigen b/gtsam/3rdparty/Eigen/Eigen index 654c8dc63..19b40ea4e 100644 --- a/gtsam/3rdparty/Eigen/Eigen +++ b/gtsam/3rdparty/Eigen/Eigen @@ -1,2 +1,2 @@ #include "Dense" -#include "Sparse" +//#include "Sparse" diff --git a/gtsam/3rdparty/Eigen/Eigen2Support b/gtsam/3rdparty/Eigen/Eigen2Support index b612c2961..d96592a8d 100644 --- a/gtsam/3rdparty/Eigen/Eigen2Support +++ b/gtsam/3rdparty/Eigen/Eigen2Support @@ -29,7 +29,7 @@ #error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header #endif -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" namespace Eigen { @@ -43,6 +43,9 @@ namespace Eigen { * */ +#include "src/Eigen2Support/Macros.h" +#include "src/Eigen2Support/Memory.h" +#include "src/Eigen2Support/Meta.h" #include "src/Eigen2Support/Lazy.h" #include "src/Eigen2Support/Cwise.h" #include "src/Eigen2Support/CwiseOperators.h" @@ -50,11 +53,12 @@ namespace Eigen { #include "src/Eigen2Support/Block.h" #include "src/Eigen2Support/VectorBlock.h" #include "src/Eigen2Support/Minor.h" +#include "src/Eigen2Support/MathFunctions.h" } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" // Eigen2 used to include iostream #include diff --git a/gtsam/3rdparty/Eigen/Eigenvalues b/gtsam/3rdparty/Eigen/Eigenvalues index de85fc5b9..250c0f466 100644 --- a/gtsam/3rdparty/Eigen/Eigenvalues +++ b/gtsam/3rdparty/Eigen/Eigenvalues @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" #include "Cholesky" #include "Jacobi" @@ -38,7 +38,7 @@ namespace Eigen { } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_EIGENVALUES_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/Geometry b/gtsam/3rdparty/Eigen/Geometry index 76e64aad0..78277c0c5 100644 --- a/gtsam/3rdparty/Eigen/Geometry +++ b/gtsam/3rdparty/Eigen/Geometry @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" #include "SVD" #include "LU" @@ -33,27 +33,34 @@ namespace Eigen { */ #include "src/Geometry/OrthoMethods.h" -#include "src/Geometry/Homogeneous.h" -#include "src/Geometry/RotationBase.h" -#include "src/Geometry/Rotation2D.h" -#include "src/Geometry/Quaternion.h" -#include "src/Geometry/AngleAxis.h" #include "src/Geometry/EulerAngles.h" -#include "src/Geometry/Transform.h" -#include "src/Geometry/Translation.h" -#include "src/Geometry/Scaling.h" -#include "src/Geometry/Hyperplane.h" -#include "src/Geometry/ParametrizedLine.h" -#include "src/Geometry/AlignedBox.h" -#include "src/Geometry/Umeyama.h" -#if defined EIGEN_VECTORIZE_SSE - #include "src/Geometry/arch/Geometry_SSE.h" +#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS + #include "src/Geometry/Homogeneous.h" + #include "src/Geometry/RotationBase.h" + #include "src/Geometry/Rotation2D.h" + #include "src/Geometry/Quaternion.h" + #include "src/Geometry/AngleAxis.h" + #include "src/Geometry/Transform.h" + #include "src/Geometry/Translation.h" + #include "src/Geometry/Scaling.h" + #include "src/Geometry/Hyperplane.h" + #include "src/Geometry/ParametrizedLine.h" + #include "src/Geometry/AlignedBox.h" + #include "src/Geometry/Umeyama.h" + + #if defined EIGEN_VECTORIZE_SSE + #include "src/Geometry/arch/Geometry_SSE.h" + #endif +#endif + +#ifdef EIGEN2_SUPPORT +#include "src/Eigen2Support/Geometry/All.h" #endif } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_GEOMETRY_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/Householder b/gtsam/3rdparty/Eigen/Householder index 11a6eefd1..6b86cf65c 100644 --- a/gtsam/3rdparty/Eigen/Householder +++ b/gtsam/3rdparty/Eigen/Householder @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" namespace Eigen { @@ -21,7 +21,7 @@ namespace Eigen { } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_HOUSEHOLDER_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/Jacobi b/gtsam/3rdparty/Eigen/Jacobi index ce6ac1bff..afa676813 100644 --- a/gtsam/3rdparty/Eigen/Jacobi +++ b/gtsam/3rdparty/Eigen/Jacobi @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" namespace Eigen { @@ -23,7 +23,7 @@ namespace Eigen { } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_JACOBI_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/LU b/gtsam/3rdparty/Eigen/LU index 9ef9c97ec..226f88ca3 100644 --- a/gtsam/3rdparty/Eigen/LU +++ b/gtsam/3rdparty/Eigen/LU @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" namespace Eigen { @@ -30,9 +30,13 @@ namespace Eigen { #include "src/LU/arch/Inverse_SSE.h" #endif +#ifdef EIGEN2_SUPPORT + #include "src/Eigen2Support/LU.h" +#endif + } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_LU_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/LeastSquares b/gtsam/3rdparty/Eigen/LeastSquares new file mode 100644 index 000000000..93a6302dc --- /dev/null +++ b/gtsam/3rdparty/Eigen/LeastSquares @@ -0,0 +1,36 @@ +#ifndef EIGEN_REGRESSION_MODULE_H +#define EIGEN_REGRESSION_MODULE_H + +#ifndef EIGEN2_SUPPORT +#error LeastSquares is only available in Eigen2 support mode (define EIGEN2_SUPPORT) +#endif + +// exclude from normal eigen3-only documentation +#ifdef EIGEN2_SUPPORT + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "Eigenvalues" +#include "Geometry" + +namespace Eigen { + +/** \defgroup LeastSquares_Module LeastSquares module + * This module provides linear regression and related features. + * + * \code + * #include + * \endcode + */ + +#include "src/Eigen2Support/LeastSquares.h" + +} // namespace Eigen + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN2_SUPPORT + +#endif // EIGEN_REGRESSION_MODULE_H diff --git a/gtsam/3rdparty/Eigen/QR b/gtsam/3rdparty/Eigen/QR index fc3937114..97c1788ee 100644 --- a/gtsam/3rdparty/Eigen/QR +++ b/gtsam/3rdparty/Eigen/QR @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" #include "Cholesky" #include "Jacobi" @@ -29,13 +29,17 @@ namespace Eigen { #include "src/QR/FullPivHouseholderQR.h" #include "src/QR/ColPivHouseholderQR.h" +#ifdef EIGEN2_SUPPORT +#include "src/Eigen2Support/QR.h" +#endif } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" -// FIXME for compatibility we include Eigenvalues here: +#ifdef EIGEN2_SUPPORT #include "Eigenvalues" +#endif #endif // EIGEN_QR_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/QtAlignedMalloc b/gtsam/3rdparty/Eigen/QtAlignedMalloc index 698607faa..46f7d83b7 100644 --- a/gtsam/3rdparty/Eigen/QtAlignedMalloc +++ b/gtsam/3rdparty/Eigen/QtAlignedMalloc @@ -6,27 +6,27 @@ #if (!EIGEN_MALLOC_ALREADY_ALIGNED) -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" void *qMalloc(size_t size) { - return Eigen::ei_aligned_malloc(size); + return Eigen::internal::aligned_malloc(size); } void qFree(void *ptr) { - Eigen::ei_aligned_free(ptr); + Eigen::internal::aligned_free(ptr); } void *qRealloc(void *ptr, size_t size) { - void* newPtr = Eigen::ei_aligned_malloc(size); + void* newPtr = Eigen::internal::aligned_malloc(size); memcpy(newPtr, ptr, size); - Eigen::ei_aligned_free(ptr); + Eigen::internal::aligned_free(ptr); return newPtr; } -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif diff --git a/gtsam/3rdparty/Eigen/SVD b/gtsam/3rdparty/Eigen/SVD index 93277dbc3..d24471fd7 100644 --- a/gtsam/3rdparty/Eigen/SVD +++ b/gtsam/3rdparty/Eigen/SVD @@ -5,7 +5,7 @@ #include "Householder" #include "Jacobi" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" namespace Eigen { @@ -26,9 +26,13 @@ namespace Eigen { #include "src/SVD/JacobiSVD.h" #include "src/SVD/UpperBidiagonalization.h" +#ifdef EIGEN2_SUPPORT +#include "src/Eigen2Support/SVD.h" +#endif + } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SVD_MODULE_H /* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/gtsam/3rdparty/Eigen/Sparse b/gtsam/3rdparty/Eigen/Sparse index ce6ef2f3c..7425b3a41 100644 --- a/gtsam/3rdparty/Eigen/Sparse +++ b/gtsam/3rdparty/Eigen/Sparse @@ -3,7 +3,7 @@ #include "Core" -#include "src/Core/util/DisableMSVCWarnings.h" +#include "src/Core/util/DisableStupidWarnings.h" #include #include @@ -11,6 +11,14 @@ #include #include +#ifdef EIGEN2_SUPPORT +#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET +#endif + +#ifndef EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET +#error The sparse module API is not stable yet. To use it anyway, please define the EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET preprocessor token. +#endif + namespace Eigen { /** \defgroup Sparse_Module Sparse module @@ -55,7 +63,7 @@ struct Sparse {}; } // namespace Eigen -#include "src/Core/util/EnableMSVCWarnings.h" +#include "src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_SPARSE_MODULE_H diff --git a/gtsam/3rdparty/Eigen/src/CMakeLists.txt b/gtsam/3rdparty/Eigen/src/CMakeLists.txt index 52696a803..c326f374d 100644 --- a/gtsam/3rdparty/Eigen/src/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/src/CMakeLists.txt @@ -1,6 +1,7 @@ file(GLOB Eigen_src_subdirectories "*") +escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") foreach(f ${Eigen_src_subdirectories}) - if(NOT f MATCHES ".txt") + if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" ) add_subdirectory(${f}) endif() endforeach() diff --git a/gtsam/3rdparty/Eigen/src/Cholesky/LDLT.h b/gtsam/3rdparty/Eigen/src/Cholesky/LDLT.h index 15a92f8fe..5e2352caa 100644 --- a/gtsam/3rdparty/Eigen/src/Cholesky/LDLT.h +++ b/gtsam/3rdparty/Eigen/src/Cholesky/LDLT.h @@ -27,7 +27,9 @@ #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H +namespace internal { template struct LDLT_Traits; +} /** \ingroup cholesky_Module * @@ -74,7 +76,7 @@ template class LDLT typedef Transpositions TranspositionType; typedef PermutationMatrix PermutationType; - typedef LDLT_Traits Traits; + typedef internal::LDLT_Traits Traits; /** \brief Default Constructor. * @@ -108,14 +110,14 @@ template class LDLT /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getL(m_matrix); } @@ -123,28 +125,35 @@ template class LDLT */ inline const TranspositionType& transpositionsP() const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_transpositions; } /** \returns the coefficients of the diagonal matrix D */ - inline Diagonal vectorD(void) const + inline Diagonal vectorD(void) const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ inline bool isPositive(void) const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == 1; } + + #ifdef EIGEN2_SUPPORT + inline bool isPositiveDefinite() const + { + return isPositive(); + } + #endif /** \returns true if the matrix is negative (semidefinite) */ inline bool isNegative(void) const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == -1; } @@ -155,15 +164,24 @@ template class LDLT * \sa solveInPlace(), MatrixBase::ldlt() */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "LDLT is not initialized."); - ei_assert(m_matrix.rows()==b.rows() + eigen_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_matrix.rows()==b.rows() && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); - return ei_solve_retval(*this, b.derived()); + return internal::solve_retval(*this, b.derived()); } + #ifdef EIGEN2_SUPPORT + template + bool solve(const MatrixBase& b, ResultType *result) const + { + *result = this->solve(b); + return true; + } + #endif + template bool solveInPlace(MatrixBase &bAndX) const; @@ -175,7 +193,7 @@ template class LDLT */ inline const MatrixType& matrixLDLT() const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix; } @@ -199,9 +217,11 @@ template class LDLT bool m_isInitialized; }; -template struct ei_ldlt_inplace; +namespace internal { -template<> struct ei_ldlt_inplace +template struct ldlt_inplace; + +template<> struct ldlt_inplace { template static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) @@ -209,14 +229,14 @@ template<> struct ei_ldlt_inplace typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; - ei_assert(mat.rows()==mat.cols()); + eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); if (size <= 1) { transpositions.setIdentity(); if(sign) - *sign = ei_real(mat.coeff(0,0))>0 ? 1:-1; + *sign = real(mat.coeff(0,0))>0 ? 1:-1; return true; } @@ -234,10 +254,10 @@ template<> struct ei_ldlt_inplace // The biggest overall is the point of reference to which further diagonals // are compared; if any diagonal is negligible compared // to the largest overall, the algorithm bails. - cutoff = ei_abs(NumTraits::epsilon() * biggest_in_corner); + cutoff = abs(NumTraits::epsilon() * biggest_in_corner); if(sign) - *sign = ei_real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1; + *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1; } // Finish early if the matrix is not full rank. @@ -259,11 +279,11 @@ template<> struct ei_ldlt_inplace for(int i=k+1;i::IsComplex) - mat.coeffRef(index_of_biggest_in_corner,k) = ei_conj(mat.coeff(index_of_biggest_in_corner,k)); + mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k)); } // partition the matrix: @@ -282,7 +302,7 @@ template<> struct ei_ldlt_inplace if(rs>0) A21.noalias() -= A20 * temp.head(k); } - if((rs>0) && (ei_abs(mat.coeffRef(k,k)) > cutoff)) + if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff)) A21 /= mat.coeffRef(k,k); } @@ -290,13 +310,13 @@ template<> struct ei_ldlt_inplace } }; -template<> struct ei_ldlt_inplace +template<> struct ldlt_inplace { template static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) { Transpose matt(mat); - return ei_ldlt_inplace::unblocked(matt, transpositions, temp, sign); + return ldlt_inplace::unblocked(matt, transpositions, temp, sign); } }; @@ -316,12 +336,14 @@ template struct LDLT_Traits inline static MatrixU getU(const MatrixType& m) { return m; } }; +} // end namespace internal + /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix */ template LDLT& LDLT::compute(const MatrixType& a) { - ei_assert(a.rows()==a.cols()); + eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix = a; @@ -330,22 +352,23 @@ LDLT& LDLT::compute(const MatrixType& a) m_isInitialized = false; m_temporary.resize(size); - ei_ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign); + internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign); m_isInitialized = true; return *this; } +namespace internal { template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { typedef LDLT<_MatrixType,_UpLo> LDLTType; EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs) template void evalTo(Dest& dst) const { - ei_assert(rhs().rows() == dec().matrixLDLT().rows()); + eigen_assert(rhs().rows() == dec().matrixLDLT().rows()); // dst = P b dst = dec().transpositionsP() * rhs(); @@ -362,6 +385,7 @@ struct ei_solve_retval, Rhs> dst = dec().transpositionsP().transpose() * dst; } }; +} /** \internal use x = ldlt_object.solve(x); * @@ -380,9 +404,9 @@ template template bool LDLT::solveInPlace(MatrixBase &bAndX) const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); const Index size = m_matrix.rows(); - ei_assert(size == bAndX.rows()); + eigen_assert(size == bAndX.rows()); bAndX = this->solve(bAndX); @@ -395,7 +419,7 @@ bool LDLT::solveInPlace(MatrixBase &bAndX) const template MatrixType LDLT::reconstructedMatrix() const { - ei_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_isInitialized && "LDLT is not initialized."); const Index size = m_matrix.rows(); MatrixType res(size,size); diff --git a/gtsam/3rdparty/Eigen/src/Cholesky/LLT.h b/gtsam/3rdparty/Eigen/src/Cholesky/LLT.h index df135fce3..a8fc525e8 100644 --- a/gtsam/3rdparty/Eigen/src/Cholesky/LLT.h +++ b/gtsam/3rdparty/Eigen/src/Cholesky/LLT.h @@ -25,7 +25,9 @@ #ifndef EIGEN_LLT_H #define EIGEN_LLT_H +namespace internal{ template struct LLT_Traits; +} /** \ingroup cholesky_Module * @@ -68,12 +70,12 @@ template class LLT typedef typename MatrixType::Index Index; enum { - PacketSize = ei_packet_traits::size, + PacketSize = internal::packet_traits::size, AlignmentMask = int(PacketSize)-1, UpLo = _UpLo }; - typedef LLT_Traits Traits; + typedef internal::LLT_Traits Traits; /** * \brief Default Constructor. @@ -102,14 +104,14 @@ template class LLT /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { - ei_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { - ei_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getL(m_matrix); } @@ -124,15 +126,26 @@ template class LLT * \sa solveInPlace(), MatrixBase::llt() */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "LLT is not initialized."); - ei_assert(m_matrix.rows()==b.rows() + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_matrix.rows()==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b"); - return ei_solve_retval(*this, b.derived()); + return internal::solve_retval(*this, b.derived()); } + #ifdef EIGEN2_SUPPORT + template + bool solve(const MatrixBase& b, ResultType *result) const + { + *result = this->solve(b); + return true; + } + + bool isPositiveDefinite() const { return true; } + #endif + template void solveInPlace(MatrixBase &bAndX) const; @@ -144,7 +157,7 @@ template class LLT */ inline const MatrixType& matrixLLT() const { - ei_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_isInitialized && "LLT is not initialized."); return m_matrix; } @@ -158,7 +171,7 @@ template class LLT */ ComputationInfo info() const { - ei_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_isInitialized && "LLT is not initialized."); return m_info; } @@ -175,17 +188,20 @@ template class LLT ComputationInfo m_info; }; -template struct ei_llt_inplace; +namespace internal { -template<> struct ei_llt_inplace +template struct llt_inplace; + +template<> struct llt_inplace { template - static bool unblocked(MatrixType& mat) + static typename MatrixType::Index unblocked(MatrixType& mat) { + typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - ei_assert(mat.rows()==mat.cols()); + + eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); for(Index k = 0; k < size; ++k) { @@ -195,22 +211,22 @@ template<> struct ei_llt_inplace Block A10(mat,k,0,1,k); Block A20(mat,k+1,0,rs,k); - RealScalar x = ei_real(mat.coeff(k,k)); - if (k>0) x -= mat.row(k).head(k).squaredNorm(); + RealScalar x = real(mat.coeff(k,k)); + if (k>0) x -= A10.squaredNorm(); if (x<=RealScalar(0)) - return false; - mat.coeffRef(k,k) = x = ei_sqrt(x); + return k; + mat.coeffRef(k,k) = x = sqrt(x); if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); if (rs>0) A21 *= RealScalar(1)/x; } - return true; + return -1; } template - static bool blocked(MatrixType& m) + static typename MatrixType::Index blocked(MatrixType& m) { typedef typename MatrixType::Index Index; - ei_assert(m.rows()==m.cols()); + eigen_assert(m.rows()==m.cols()); Index size = m.rows(); if(size<32) return unblocked(m); @@ -231,27 +247,28 @@ template<> struct ei_llt_inplace Block A21(m,k+bs,k, rs,bs); Block A22(m,k+bs,k+bs,rs,rs); - if(!unblocked(A11)) return false; + Index ret; + if((ret=unblocked(A11))>=0) return k+ret; if(rs>0) A11.adjoint().template triangularView().template solveInPlace(A21); if(rs>0) A22.template selfadjointView().rankUpdate(A21,-1); // bottleneck } - return true; + return -1; } }; -template<> struct ei_llt_inplace +template<> struct llt_inplace { template - static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat) + static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat) { Transpose matt(mat); - return ei_llt_inplace::unblocked(matt); + return llt_inplace::unblocked(matt); } template - static EIGEN_STRONG_INLINE bool blocked(MatrixType& mat) + static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat) { Transpose matt(mat); - return ei_llt_inplace::blocked(matt); + return llt_inplace::blocked(matt); } }; @@ -262,7 +279,7 @@ template struct LLT_Traits inline static MatrixL getL(const MatrixType& m) { return m; } inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); } static bool inplace_decomposition(MatrixType& m) - { return ei_llt_inplace::blocked(m); } + { return llt_inplace::blocked(m)==-1; } }; template struct LLT_Traits @@ -272,9 +289,11 @@ template struct LLT_Traits inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); } inline static MatrixU getU(const MatrixType& m) { return m; } static bool inplace_decomposition(MatrixType& m) - { return ei_llt_inplace::blocked(m); } + { return llt_inplace::blocked(m)==-1; } }; +} // end namespace internal + /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix * * @@ -295,9 +314,10 @@ LLT& LLT::compute(const MatrixType& a) return *this; } +namespace internal { template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { typedef LLT<_MatrixType,UpLo> LLTType; EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs) @@ -308,6 +328,7 @@ struct ei_solve_retval, Rhs> dec().solveInPlace(dst); } }; +} /** \internal use x = llt_object.solve(x); * @@ -326,8 +347,8 @@ template template void LLT::solveInPlace(MatrixBase &bAndX) const { - ei_assert(m_isInitialized && "LLT is not initialized."); - ei_assert(m_matrix.rows()==bAndX.rows()); + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_matrix.rows()==bAndX.rows()); matrixL().solveInPlace(bAndX); matrixU().solveInPlace(bAndX); } @@ -338,7 +359,7 @@ void LLT::solveInPlace(MatrixBase &bAndX) const template MatrixType LLT::reconstructedMatrix() const { - ei_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_isInitialized && "LLT is not initialized."); return matrixL() * matrixL().adjoint().toDenseMatrix(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/Array.h b/gtsam/3rdparty/Eigen/src/Core/Array.h index 2e97f18ee..a3a2167ad 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Array.h +++ b/gtsam/3rdparty/Eigen/src/Core/Array.h @@ -37,22 +37,27 @@ * API for the %Matrix class provides easy access to linear-algebra * operations. * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. + * * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy */ +namespace internal { template -struct ei_traits > : ei_traits > +struct traits > : traits > { typedef ArrayXpr XprKind; typedef ArrayBase > XprBase; }; +} template class Array - : public DenseStorageBase > + : public PlainObjectBase > { public: - typedef DenseStorageBase Base; + typedef PlainObjectBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(Array) enum { Options = _Options }; @@ -60,7 +65,7 @@ class Array protected: template - friend struct ei_conservative_resize_like_impl; + friend struct internal::conservative_resize_like_impl; using Base::m_storage; public: @@ -126,8 +131,8 @@ class Array #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ?? /** \internal */ - Array(ei_constructor_without_unaligned_array_assert) - : Base(ei_constructor_without_unaligned_array_assert()) + Array(internal::constructor_without_unaligned_array_assert) + : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED @@ -145,8 +150,8 @@ class Array { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array) - ei_assert(dim > 0); - ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); + eigen_assert(dim >= 0); + eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED } @@ -228,7 +233,7 @@ class Array * data pointers. */ template - void swap(ArrayBase EIGEN_REF_TO_TEMPORARY other) + void swap(ArrayBase const & other) { this->_swap(other.derived()); } inline Index innerStride() const { return 1; } @@ -241,7 +246,7 @@ class Array private: template - friend struct ei_matrix_swap_impl; + friend struct internal::matrix_swap_impl; }; /** \defgroup arraytypedefs Global array typedefs diff --git a/gtsam/3rdparty/Eigen/src/Core/ArrayBase.h b/gtsam/3rdparty/Eigen/src/Core/ArrayBase.h index f0fbcd4ff..9399ac3d1 100644 --- a/gtsam/3rdparty/Eigen/src/Core/ArrayBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/ArrayBase.h @@ -42,7 +42,10 @@ template class MatrixWrapper; * * This class is the base that is inherited by all array expression types. * - * \param Derived is the derived type, e.g., an array or an expression type. + * \tparam Derived is the derived type, e.g., an array or an expression type. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. * * \sa class MatrixBase, \ref TopicClassHierarchy */ @@ -53,16 +56,16 @@ template class ArrayBase #ifndef EIGEN_PARSED_BY_DOXYGEN /** The base class for a given storage type. */ typedef ArrayBase StorageBaseType; - + typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; - using ei_special_scalar_op_base::Scalar, - typename NumTraits::Scalar>::Real>::operator*; + using internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real>::operator*; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; @@ -91,6 +94,7 @@ template class ArrayBase using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; + #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -99,17 +103,17 @@ template class ArrayBase * reference to a matrix, not a matrix! It is however guaranteed that the return type of eval() is either * PlainObject or const PlainObject&. */ - typedef Array::Scalar, - ei_traits::RowsAtCompileTime, - ei_traits::ColsAtCompileTime, - AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), - ei_traits::MaxRowsAtCompileTime, - ei_traits::MaxColsAtCompileTime + typedef Array::Scalar, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime > PlainObject; /** \internal Represents a matrix with all coefficients equal to one another*/ - typedef CwiseNullaryOp,Derived> ConstantReturnType; + typedef CwiseNullaryOp,Derived> ConstantReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase @@ -129,7 +133,7 @@ template class ArrayBase */ Derived& operator=(const ArrayBase& other) { - return ei_assign_selector::run(derived(), other.derived()); + return internal::assign_selector::run(derived(), other.derived()); } Derived& operator+=(const Scalar& scalar) @@ -169,10 +173,10 @@ template class ArrayBase template explicit ArrayBase(const ArrayBase&); protected: // mixing arrays and matrices is not legal - template Derived& operator+=(const MatrixBase& mat) + template Derived& operator+=(const MatrixBase& ) {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} // mixing arrays and matrices is not legal - template Derived& operator-=(const MatrixBase& mat) + template Derived& operator-=(const MatrixBase& ) {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} }; @@ -185,8 +189,8 @@ template EIGEN_STRONG_INLINE Derived & ArrayBase::operator-=(const ArrayBase &other) { - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); - tmp = other; + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); return derived(); } @@ -199,7 +203,7 @@ template EIGEN_STRONG_INLINE Derived & ArrayBase::operator+=(const ArrayBase& other) { - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } @@ -213,7 +217,7 @@ template EIGEN_STRONG_INLINE Derived & ArrayBase::operator*=(const ArrayBase& other) { - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } @@ -227,7 +231,7 @@ template EIGEN_STRONG_INLINE Derived & ArrayBase::operator/=(const ArrayBase& other) { - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/ArrayWrapper.h b/gtsam/3rdparty/Eigen/src/Core/ArrayWrapper.h index dc5bba443..7ba01de36 100644 --- a/gtsam/3rdparty/Eigen/src/Core/ArrayWrapper.h +++ b/gtsam/3rdparty/Eigen/src/Core/ArrayWrapper.h @@ -35,12 +35,15 @@ * * \sa MatrixBase::array(), class MatrixWrapper */ + +namespace internal { template -struct ei_traits > - : public ei_traits::type > +struct traits > + : public traits::type > { typedef ArrayXpr XprKind; }; +} template class ArrayWrapper : public ArrayBase > @@ -50,7 +53,7 @@ class ArrayWrapper : public ArrayBase > EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) - typedef typename ei_nested::type NestedExpressionType; + typedef typename internal::nested::type NestedExpressionType; inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {} @@ -69,6 +72,11 @@ class ArrayWrapper : public ArrayBase > return m_expression.const_cast_derived().coeffRef(row, col); } + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); @@ -79,6 +87,11 @@ class ArrayWrapper : public ArrayBase > return m_expression.const_cast_derived().coeffRef(index); } + inline const Scalar& coeffRef(Index index) const + { + return m_expression.const_cast_derived().coeffRef(index); + } + template inline const PacketScalar packet(Index row, Index col) const { @@ -121,12 +134,14 @@ class ArrayWrapper : public ArrayBase > * \sa MatrixBase::matrix(), class ArrayWrapper */ +namespace internal { template -struct ei_traits > - : public ei_traits::type > +struct traits > + : public traits::type > { typedef MatrixXpr XprKind; }; +} template class MatrixWrapper : public MatrixBase > @@ -136,7 +151,7 @@ class MatrixWrapper : public MatrixBase > EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) - typedef typename ei_nested::type NestedExpressionType; + typedef typename internal::nested::type NestedExpressionType; inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {} @@ -155,6 +170,11 @@ class MatrixWrapper : public MatrixBase > return m_expression.const_cast_derived().coeffRef(row, col); } + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_expression.derived().coeffRef(row, col); + } + inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); @@ -165,6 +185,11 @@ class MatrixWrapper : public MatrixBase > return m_expression.const_cast_derived().coeffRef(index); } + inline const Scalar& coeffRef(Index index) const + { + return m_expression.const_cast_derived().coeffRef(index); + } + template inline const PacketScalar packet(Index row, Index col) const { diff --git a/gtsam/3rdparty/Eigen/src/Core/Assign.h b/gtsam/3rdparty/Eigen/src/Core/Assign.h index 335a888f6..3a17152f0 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Assign.h +++ b/gtsam/3rdparty/Eigen/src/Core/Assign.h @@ -27,19 +27,21 @@ #ifndef EIGEN_ASSIGN_H #define EIGEN_ASSIGN_H +namespace internal { + /*************************************************************************** * Part 1 : the logic deciding a strategy for traversal and unrolling * ***************************************************************************/ template -struct ei_assign_traits +struct assign_traits { public: enum { DstIsAligned = Derived::Flags & AlignedBit, DstHasDirectAccess = Derived::Flags & DirectAccessBit, SrcIsAligned = OtherDerived::Flags & AlignedBit, - JointAlignment = DstIsAligned && SrcIsAligned ? Aligned : Unaligned + JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned }; private: @@ -51,7 +53,7 @@ private: : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime) : int(Derived::MaxRowsAtCompileTime), MaxSizeAtCompileTime = Derived::SizeAtCompileTime, - PacketSize = ei_packet_traits::size + PacketSize = packet_traits::size }; enum { @@ -104,9 +106,9 @@ public: : int(NoUnrolling) ) : int(Traversal) == int(LinearVectorizedTraversal) - ? ( int(MayUnrollCompletely) && int(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) ) + ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) ) : int(Traversal) == int(LinearTraversal) - ? ( int(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) ) + ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) ) : int(NoUnrolling) }; @@ -143,7 +145,7 @@ public: ************************/ template -struct ei_assign_DefaultTraversal_CompleteUnrolling +struct assign_DefaultTraversal_CompleteUnrolling { enum { outer = Index / Derived1::InnerSizeAtCompileTime, @@ -153,28 +155,28 @@ struct ei_assign_DefaultTraversal_CompleteUnrolling EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { dst.copyCoeffByOuterInner(outer, inner, src); - ei_assign_DefaultTraversal_CompleteUnrolling::run(dst, src); + assign_DefaultTraversal_CompleteUnrolling::run(dst, src); } }; template -struct ei_assign_DefaultTraversal_CompleteUnrolling +struct assign_DefaultTraversal_CompleteUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} }; template -struct ei_assign_DefaultTraversal_InnerUnrolling +struct assign_DefaultTraversal_InnerUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int outer) { dst.copyCoeffByOuterInner(outer, Index, src); - ei_assign_DefaultTraversal_InnerUnrolling::run(dst, src, outer); + assign_DefaultTraversal_InnerUnrolling::run(dst, src, outer); } }; template -struct ei_assign_DefaultTraversal_InnerUnrolling +struct assign_DefaultTraversal_InnerUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {} }; @@ -184,17 +186,17 @@ struct ei_assign_DefaultTraversal_InnerUnrolling ***********************/ template -struct ei_assign_LinearTraversal_CompleteUnrolling +struct assign_LinearTraversal_CompleteUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { dst.copyCoeff(Index, src); - ei_assign_LinearTraversal_CompleteUnrolling::run(dst, src); + assign_LinearTraversal_CompleteUnrolling::run(dst, src); } }; template -struct ei_assign_LinearTraversal_CompleteUnrolling +struct assign_LinearTraversal_CompleteUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} }; @@ -204,41 +206,41 @@ struct ei_assign_LinearTraversal_CompleteUnrolling -struct ei_assign_innervec_CompleteUnrolling +struct assign_innervec_CompleteUnrolling { enum { outer = Index / Derived1::InnerSizeAtCompileTime, inner = Index % Derived1::InnerSizeAtCompileTime, - JointAlignment = ei_assign_traits::JointAlignment + JointAlignment = assign_traits::JointAlignment }; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { dst.template copyPacketByOuterInner(outer, inner, src); - ei_assign_innervec_CompleteUnrolling::size, Stop>::run(dst, src); + assign_innervec_CompleteUnrolling::size, Stop>::run(dst, src); } }; template -struct ei_assign_innervec_CompleteUnrolling +struct assign_innervec_CompleteUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} }; template -struct ei_assign_innervec_InnerUnrolling +struct assign_innervec_InnerUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int outer) { dst.template copyPacketByOuterInner(outer, Index, src); - ei_assign_innervec_InnerUnrolling::size, Stop>::run(dst, src, outer); + assign_innervec_InnerUnrolling::size, Stop>::run(dst, src, outer); } }; template -struct ei_assign_innervec_InnerUnrolling +struct assign_innervec_InnerUnrolling { EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {} }; @@ -248,22 +250,22 @@ struct ei_assign_innervec_InnerUnrolling ***************************************************************************/ template::Traversal, - int Unrolling = ei_assign_traits::Unrolling> -struct ei_assign_impl; + int Traversal = assign_traits::Traversal, + int Unrolling = assign_traits::Unrolling> +struct assign_impl; /************************ *** Default traversal *** ************************/ template -struct ei_assign_impl +struct assign_impl { inline static void run(Derived1 &, const Derived2 &) { } }; template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -277,24 +279,24 @@ struct ei_assign_impl }; template -struct ei_assign_impl +struct assign_impl { EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - ei_assign_DefaultTraversal_CompleteUnrolling + assign_DefaultTraversal_CompleteUnrolling ::run(dst, src); } }; template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { const Index outerSize = dst.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) - ei_assign_DefaultTraversal_InnerUnrolling + assign_DefaultTraversal_InnerUnrolling ::run(dst, src, outer); } }; @@ -304,7 +306,7 @@ struct ei_assign_impl ***********************/ template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -316,11 +318,11 @@ struct ei_assign_impl }; template -struct ei_assign_impl +struct assign_impl { EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - ei_assign_LinearTraversal_CompleteUnrolling + assign_LinearTraversal_CompleteUnrolling ::run(dst, src); } }; @@ -330,14 +332,14 @@ struct ei_assign_impl **************************/ template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { const Index innerSize = dst.innerSize(); const Index outerSize = dst.outerSize(); - const Index packetSize = ei_packet_traits::size; + const Index packetSize = packet_traits::size; for(Index outer = 0; outer < outerSize; ++outer) for(Index inner = 0; inner < innerSize; inner+=packetSize) dst.template copyPacketByOuterInner(outer, inner, src); @@ -345,24 +347,24 @@ struct ei_assign_impl }; template -struct ei_assign_impl +struct assign_impl { EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - ei_assign_innervec_CompleteUnrolling + assign_innervec_CompleteUnrolling ::run(dst, src); } }; template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { const Index outerSize = dst.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) - ei_assign_innervec_InnerUnrolling + assign_innervec_InnerUnrolling ::run(dst, src, outer); } }; @@ -372,14 +374,14 @@ struct ei_assign_impl -struct ei_unaligned_assign_impl +struct unaligned_assign_impl { template static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {} }; template <> -struct ei_unaligned_assign_impl +struct unaligned_assign_impl { // MSVC must not inline this functions. If it does, it fails to optimize the // packet access path. @@ -397,45 +399,45 @@ struct ei_unaligned_assign_impl }; template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { const Index size = dst.size(); - typedef ei_packet_traits PacketTraits; + typedef packet_traits PacketTraits; enum { packetSize = PacketTraits::size, - dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(ei_assign_traits::DstIsAligned) , - srcAlignment = ei_assign_traits::JointAlignment + dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits::DstIsAligned) , + srcAlignment = assign_traits::JointAlignment }; - const Index alignedStart = ei_assign_traits::DstIsAligned ? 0 - : ei_first_aligned(&dst.coeffRef(0), size); + const Index alignedStart = assign_traits::DstIsAligned ? 0 + : first_aligned(&dst.coeffRef(0), size); const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; - ei_unaligned_assign_impl::DstIsAligned!=0>::run(src,dst,0,alignedStart); + unaligned_assign_impl::DstIsAligned!=0>::run(src,dst,0,alignedStart); for(Index index = alignedStart; index < alignedEnd; index += packetSize) { dst.template copyPacket(index, src); } - ei_unaligned_assign_impl<>::run(src,dst,alignedEnd,size); + unaligned_assign_impl<>::run(src,dst,alignedEnd,size); } }; template -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { enum { size = Derived1::SizeAtCompileTime, - packetSize = ei_packet_traits::size, + packetSize = packet_traits::size, alignedSize = (size/packetSize)*packetSize }; - ei_assign_innervec_CompleteUnrolling::run(dst, src); - ei_assign_DefaultTraversal_CompleteUnrolling::run(dst, src); + assign_innervec_CompleteUnrolling::run(dst, src); + assign_DefaultTraversal_CompleteUnrolling::run(dst, src); } }; @@ -444,24 +446,24 @@ struct ei_assign_impl -struct ei_assign_impl +struct assign_impl { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - typedef ei_packet_traits PacketTraits; + typedef packet_traits PacketTraits; enum { packetSize = PacketTraits::size, alignable = PacketTraits::AlignedOnScalar, - dstAlignment = alignable ? Aligned : int(ei_assign_traits::DstIsAligned) , - srcAlignment = ei_assign_traits::JointAlignment + dstAlignment = alignable ? Aligned : int(assign_traits::DstIsAligned) , + srcAlignment = assign_traits::JointAlignment }; const Index packetAlignedMask = packetSize - 1; const Index innerSize = dst.innerSize(); const Index outerSize = dst.outerSize(); const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0; - Index alignedStart = ((!alignable) || ei_assign_traits::DstIsAligned) ? 0 - : ei_first_aligned(&dst.coeffRef(0,0), innerSize); + Index alignedStart = ((!alignable) || assign_traits::DstIsAligned) ? 0 + : first_aligned(&dst.coeffRef(0,0), innerSize); for(Index outer = 0; outer < outerSize; ++outer) { @@ -472,7 +474,7 @@ struct ei_assign_impl // do the vectorizable part of the assignment for(Index inner = alignedStart; inner(outer, inner, src); + dst.template copyPacketByOuterInner(outer, inner, src); // do the non-vectorizable part of the assignment for(Index inner = alignedEnd; inner } }; +} // end namespace internal + /*************************************************************************** * Part 4 : implementation of DenseBase methods ***************************************************************************/ @@ -493,26 +497,27 @@ EIGEN_STRONG_INLINE Derived& DenseBase ::lazyAssign(const DenseBase& other) { enum{ - SameType = ei_is_same_type::ret + SameType = internal::is_same::value }; - + + EIGEN_STATIC_ASSERT_LVALUE(Derived) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - - #ifdef EIGEN_DEBUG_ASSIGN - ei_assign_traits::debug(); + internal::assign_traits::debug(); #endif - ei_assert(rows() == other.rows() && cols() == other.cols()); - ei_assign_impl::Traversal) - : int(InvalidTraversal)>::run(derived(),other.derived()); + eigen_assert(rows() == other.rows() && cols() == other.cols()); + internal::assign_impl::Traversal) + : int(InvalidTraversal)>::run(derived(),other.derived()); #ifndef EIGEN_NO_DEBUG checkTransposeAliasing(other.derived()); #endif return derived(); } +namespace internal { + template -struct ei_assign_selector; +struct assign_selector; template -struct ei_assign_selector { +struct assign_selector { EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); } }; template -struct ei_assign_selector { +struct assign_selector { EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); } }; template -struct ei_assign_selector { +struct assign_selector { EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); } }; template -struct ei_assign_selector { +struct assign_selector { EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); } }; +} // end namespace internal + template template EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) { - return ei_assign_selector::run(derived(), other.derived()); + return internal::assign_selector::run(derived(), other.derived()); } template EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) { - return ei_assign_selector::run(derived(), other.derived()); + return internal::assign_selector::run(derived(), other.derived()); } template EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const MatrixBase& other) { - return ei_assign_selector::run(derived(), other.derived()); + return internal::assign_selector::run(derived(), other.derived()); } template template EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const DenseBase& other) { - return ei_assign_selector::run(derived(), other.derived()); + return internal::assign_selector::run(derived(), other.derived()); } template diff --git a/gtsam/3rdparty/Eigen/src/Core/BandMatrix.h b/gtsam/3rdparty/Eigen/src/Core/BandMatrix.h index 2f94d12dc..a1f71d5f6 100644 --- a/gtsam/3rdparty/Eigen/src/Core/BandMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/BandMatrix.h @@ -25,112 +25,82 @@ #ifndef EIGEN_BANDMATRIX_H #define EIGEN_BANDMATRIX_H -/** - * \class BandMatrix - * \ingroup Core_Module - * - * \brief Represents a rectangular matrix with a banded storage - * - * \param _Scalar Numeric type, i.e. float, double, int - * \param Rows Number of rows, or \b Dynamic - * \param Cols Number of columns, or \b Dynamic - * \param Supers Number of super diagonal - * \param Subs Number of sub diagonal - * \param _Options A combination of either \b RowMajor or \b ColMajor, and of \b SelfAdjoint - * The former controls storage order, and defaults to column-major. The latter controls - * whether the matrix represent a selfadjoint matrix in which case either Supers of Subs - * have to be null. - * - * \sa class TridiagonalMatrix - */ -template -struct ei_traits > -{ - typedef _Scalar Scalar; - typedef Dense StorageKind; - typedef DenseIndex Index; - enum { - CoeffReadCost = NumTraits::ReadCost, - RowsAtCompileTime = Rows, - ColsAtCompileTime = Cols, - MaxRowsAtCompileTime = Rows, - MaxColsAtCompileTime = Cols, - Flags = LvalueBit - }; -}; +namespace internal { -template -class BandMatrix : public EigenBase > + +template +class BandMatrixBase : public EigenBase { public: enum { - Flags = ei_traits::Flags, - CoeffReadCost = ei_traits::CoeffReadCost, - RowsAtCompileTime = ei_traits::RowsAtCompileTime, - ColsAtCompileTime = ei_traits::ColsAtCompileTime, - MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, - MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime + Flags = internal::traits::Flags, + CoeffReadCost = internal::traits::CoeffReadCost, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + Supers = internal::traits::Supers, + Subs = internal::traits::Subs, + Options = internal::traits::Options }; - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; typedef Matrix DenseMatrixType; typedef typename DenseMatrixType::Index Index; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef EigenBase Base; protected: enum { DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic, - SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Rows,Cols) + SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime) }; - typedef Matrix DataType; public: - - inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) - : m_data(1+supers+subs,cols), - m_rows(rows), m_supers(supers), m_subs(subs) - { - //m_data.setConstant(666); - } - - /** \returns the number of columns */ - inline Index rows() const { return m_rows.value(); } - - /** \returns the number of rows */ - inline Index cols() const { return m_data.cols(); } + + using Base::derived; + using Base::rows; + using Base::cols; /** \returns the number of super diagonals */ - inline Index supers() const { return m_supers.value(); } + inline Index supers() const { return derived().supers(); } /** \returns the number of sub diagonals */ - inline Index subs() const { return m_subs.value(); } + inline Index subs() const { return derived().subs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline const CoefficientsType& coeffs() const { return derived().coeffs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline CoefficientsType& coeffs() { return derived().coeffs(); } /** \returns a vector expression of the \a i -th column, * only the meaningful part is returned. * \warning the internal storage must be column major. */ - inline Block col(Index i) + inline Block col(Index i) { EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); Index start = 0; - Index len = m_data.rows(); + Index len = coeffs().rows(); if (i<=supers()) { start = supers()-i; - len = std::min(rows(),std::max(0,m_data.rows() - (supers()-i))); + len = std::min(rows(),std::max(0,coeffs().rows() - (supers()-i))); } else if (i>=rows()-subs()) - len = std::max(0,m_data.rows() - (i + 1 - rows() + subs())); - return Block(m_data, start, i, len, 1); + len = std::max(0,coeffs().rows() - (i + 1 - rows() + subs())); + return Block(coeffs(), start, i, len, 1); } /** \returns a vector expression of the main diagonal */ - inline Block diagonal() - { return Block(m_data,supers(),0,1,std::min(rows(),cols())); } + inline Block diagonal() + { return Block(coeffs(),supers(),0,1,std::min(rows(),cols())); } /** \returns a vector expression of the main diagonal (const version) */ - inline const Block diagonal() const - { return Block(m_data,supers(),0,1,std::min(rows(),cols())); } + inline const Block diagonal() const + { return Block(coeffs(),supers(),0,1,std::min(rows(),cols())); } template struct DiagonalIntReturnType { enum { @@ -143,38 +113,38 @@ class BandMatrix : public EigenBase BuildType; - typedef typename ei_meta_if,BuildType >, - BuildType>::ret Type; + typedef Block BuildType; + typedef typename internal::conditional,BuildType >, + BuildType>::type Type; }; /** \returns a vector expression of the \a N -th sub or super diagonal */ template inline typename DiagonalIntReturnType::Type diagonal() { - return typename DiagonalIntReturnType::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N)); + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a N -th sub or super diagonal */ template inline const typename DiagonalIntReturnType::Type diagonal() const { - return typename DiagonalIntReturnType::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N)); + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ - inline Block diagonal(Index i) + inline Block diagonal(Index i) { - ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); - return Block(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); + eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); + return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ - inline const Block diagonal(Index i) const + inline const Block diagonal(Index i) const { - ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); - return Block(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); + eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); + return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); } - + template inline void evalTo(Dest& dst) const { dst.resize(rows(),cols()); @@ -197,18 +167,153 @@ class BandMatrix : public EigenBase m_rows; - ei_variable_if_dynamic m_supers; - ei_variable_if_dynamic m_subs; +/** + * \class BandMatrix + * \ingroup Core_Module + * + * \brief Represents a rectangular matrix with a banded storage + * + * \param _Scalar Numeric type, i.e. float, double, int + * \param Rows Number of rows, or \b Dynamic + * \param Cols Number of columns, or \b Dynamic + * \param Supers Number of super diagonal + * \param Subs Number of sub diagonal + * \param _Options A combination of either \b RowMajor or \b ColMajor, and of \b SelfAdjoint + * The former controls \ref TopicStorageOrders "storage order", and defaults to + * column-major. The latter controls whether the matrix represents a selfadjoint + * matrix in which case either Supers of Subs have to be null. + * + * \sa class TridiagonalMatrix + */ + +template +struct traits > +{ + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef DenseIndex Index; + enum { + CoeffReadCost = NumTraits::ReadCost, + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _Rows, + MaxColsAtCompileTime = _Cols, + Flags = LvalueBit, + Supers = _Supers, + Subs = _Subs, + Options = _Options, + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef Matrix CoefficientsType; +}; + +template +class BandMatrix : public BandMatrixBase > +{ + public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::CoefficientsType CoefficientsType; + + inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) + : m_coeffs(1+supers+subs,cols), + m_rows(rows), m_supers(supers), m_subs(subs) + { + } + + /** \returns the number of columns */ + inline Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + inline CoefficientsType& coeffs() { return m_coeffs; } + + protected: + + CoefficientsType m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +template +class BandMatrixWrapper; + +template +struct traits > +{ + typedef typename _CoefficientsType::Scalar Scalar; + typedef typename _CoefficientsType::StorageKind StorageKind; + typedef typename _CoefficientsType::Index Index; + enum { + CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost, + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _Rows, + MaxColsAtCompileTime = _Cols, + Flags = LvalueBit, + Supers = _Supers, + Subs = _Subs, + Options = _Options, + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef _CoefficientsType CoefficientsType; +}; + +template +class BandMatrixWrapper : public BandMatrixBase > +{ + public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef typename internal::traits::Index Index; + + inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) + : m_coeffs(coeffs), + m_rows(rows), m_supers(supers), m_subs(subs) + { + //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); + } + + /** \returns the number of columns */ + inline Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + + protected: + + const CoefficientsType& m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; }; /** * \class TridiagonalMatrix * \ingroup Core_Module * - * \brief Represents a tridiagonal matrix + * \brief Represents a tridiagonal matrix with a compact banded storage * * \param _Scalar Numeric type, i.e. float, double, int * \param Size Number of rows and cols, or \b Dynamic @@ -219,10 +324,10 @@ class BandMatrix : public EigenBase class TridiagonalMatrix : public BandMatrix { - typedef BandMatrix Base; + typedef BandMatrix Base; typedef typename Base::Index Index; public: - TridiagonalMatrix(Index size = Size) : Base(size,size,1,1) {} + TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); } @@ -235,4 +340,6 @@ class TridiagonalMatrix : public BandMatrix -struct ei_traits > : ei_traits +struct traits > : traits { - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::XprKind XprKind; - typedef typename ei_nested::type XprTypeNested; - typedef typename ei_unref::type _XprTypeNested; + typedef typename traits::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename nested::type XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; enum{ - MatrixRows = ei_traits::RowsAtCompileTime, - MatrixCols = ei_traits::ColsAtCompileTime, + MatrixRows = traits::RowsAtCompileTime, + MatrixCols = traits::ColsAtCompileTime, RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, MaxRowsAtCompileTime = BlockRows==0 ? 0 : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) - : int(ei_traits::MaxRowsAtCompileTime), + : int(traits::MaxRowsAtCompileTime), MaxColsAtCompileTime = BlockCols==0 ? 0 : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) - : int(ei_traits::MaxColsAtCompileTime), - XprTypeIsRowMajor = (int(ei_traits::Flags)&RowMajorBit) != 0, + : int(traits::MaxColsAtCompileTime), + XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsXprType - ? int(ei_inner_stride_at_compile_time::ret) - : int(ei_outer_stride_at_compile_time::ret), + ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsXprType - ? int(ei_outer_stride_at_compile_time::ret) - : int(ei_inner_stride_at_compile_time::ret), - MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % ei_packet_traits::size) == 0) + ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits::size) == 0) && (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0, - MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && ((OuterStrideAtCompileTime % ei_packet_traits::size) == 0)) ? AlignedBit : 0, + MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && ((OuterStrideAtCompileTime % packet_traits::size) == 0)) ? AlignedBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, - Flags0 = ei_traits::Flags & (HereditaryBits | MaskPacketAccessBit | LvalueBit | DirectAccessBit | MaskAlignedBit), - Flags1 = Flags0 | FlagsLinearAccessBit, - Flags = (Flags1 & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0) + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, + Flags0 = traits::Flags & ( (HereditaryBits & ~RowMajorBit) | + DirectAccessBit | + MaskPacketAccessBit | + MaskAlignedBit), + Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit }; }; +} template class Block - : public ei_dense_xpr_base >::type + : public internal::dense_xpr_base >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Block) class InnerIterator; /** Column or Row constructor */ - inline Block(const XprType& xpr, Index i) + inline Block(XprType& xpr, Index i) : m_xpr(xpr), // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, @@ -123,33 +130,33 @@ template=0) && ( + eigen_assert( (i>=0) && ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() + eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols()); } /** Dynamic-size constructor */ - inline Block(const XprType& xpr, + inline Block(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) { - ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); - ei_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() + eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols()); } @@ -160,16 +167,31 @@ template m_startRow; - const ei_variable_if_dynamic m_startCol; - const ei_variable_if_dynamic m_blockRows; - const ei_variable_if_dynamic m_blockCols; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; }; /** \internal */ @@ -243,15 +265,15 @@ class Block /** Column or Row constructor */ - inline Block(const XprType& xpr, Index i) - : Base(&xpr.const_cast_derived().coeffRef( + inline Block(XprType& xpr, Index i) + : Base(internal::const_cast_ptr(&xpr.coeffRef( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0, - (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)), BlockRows==1 ? 1 : xpr.rows(), BlockCols==1 ? 1 : xpr.cols()), m_xpr(xpr) { - ei_assert( (i>=0) && ( + eigen_assert( (i>=0) && ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i /** Fixed-size constructor */ - inline Block(const XprType& xpr, Index startRow, Index startCol) - : Base(&xpr.const_cast_derived().coeffRef(startRow,startCol)), m_xpr(xpr) + inline Block(XprType& xpr, Index startRow, Index startCol) + : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr) { - ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() + eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols()); init(); } /** Dynamic-size constructor */ - inline Block(const XprType& xpr, + inline Block(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : Base(&xpr.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols), + : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols), m_xpr(xpr) { - ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); - ei_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() + eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols()); init(); } @@ -285,7 +307,7 @@ class Block /** \sa MapBase::innerStride() */ inline Index innerStride() const { - return ei_traits::HasSameStorageOrderAsXprType + return internal::traits::HasSameStorageOrderAsXprType ? m_xpr.innerStride() : m_xpr.outerStride(); } @@ -304,7 +326,7 @@ class Block #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal used by allowAligned() */ - inline Block(const XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) + inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) : Base(data, blockRows, blockCols), m_xpr(xpr) { init(); @@ -314,7 +336,7 @@ class Block protected: void init() { - m_outerStride = ei_traits::HasSameStorageOrderAsXprType + m_outerStride = internal::traits::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/BooleanRedux.h b/gtsam/3rdparty/Eigen/src/Core/BooleanRedux.h index 9f9d1b594..5c3444a57 100644 --- a/gtsam/3rdparty/Eigen/src/Core/BooleanRedux.h +++ b/gtsam/3rdparty/Eigen/src/Core/BooleanRedux.h @@ -25,8 +25,10 @@ #ifndef EIGEN_ALLANDANY_H #define EIGEN_ALLANDANY_H +namespace internal { + template -struct ei_all_unroller +struct all_unroller { enum { col = (UnrollCount-1) / Derived::RowsAtCompileTime, @@ -35,24 +37,24 @@ struct ei_all_unroller inline static bool run(const Derived &mat) { - return ei_all_unroller::run(mat) && mat.coeff(row, col); + return all_unroller::run(mat) && mat.coeff(row, col); } }; template -struct ei_all_unroller +struct all_unroller { inline static bool run(const Derived &mat) { return mat.coeff(0, 0); } }; template -struct ei_all_unroller +struct all_unroller { inline static bool run(const Derived &) { return false; } }; template -struct ei_any_unroller +struct any_unroller { enum { col = (UnrollCount-1) / Derived::RowsAtCompileTime, @@ -61,22 +63,24 @@ struct ei_any_unroller inline static bool run(const Derived &mat) { - return ei_any_unroller::run(mat) || mat.coeff(row, col); + return any_unroller::run(mat) || mat.coeff(row, col); } }; template -struct ei_any_unroller +struct any_unroller { inline static bool run(const Derived &mat) { return mat.coeff(0, 0); } }; template -struct ei_any_unroller +struct any_unroller { inline static bool run(const Derived &) { return false; } }; +} // end namespace internal + /** \returns true if all coefficients are true * * Example: \include MatrixBase_all.cpp @@ -94,7 +98,7 @@ inline bool DenseBase::all() const && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT }; if(unroll) - return ei_all_unroller::run(derived()); else @@ -120,7 +124,7 @@ inline bool DenseBase::any() const && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT }; if(unroll) - return ei_any_unroller::run(derived()); else diff --git a/gtsam/3rdparty/Eigen/src/Core/CommaInitializer.h b/gtsam/3rdparty/Eigen/src/Core/CommaInitializer.h index da8df5592..92422bf2f 100644 --- a/gtsam/3rdparty/Eigen/src/Core/CommaInitializer.h +++ b/gtsam/3rdparty/Eigen/src/Core/CommaInitializer.h @@ -64,12 +64,12 @@ struct CommaInitializer m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = 1; - ei_assert(m_row @@ -101,7 +101,7 @@ struct CommaInitializer inline ~CommaInitializer() { - ei_assert((m_row+m_currentBlockRows) == m_xpr.rows() + eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows() && m_col == m_xpr.cols() && "Too few coefficients passed to comma initializer (operator<<)"); } diff --git a/gtsam/3rdparty/Eigen/src/Core/CwiseBinaryOp.h b/gtsam/3rdparty/Eigen/src/Core/CwiseBinaryOp.h index 5def0db2a..7386b2e18 100644 --- a/gtsam/3rdparty/Eigen/src/Core/CwiseBinaryOp.h +++ b/gtsam/3rdparty/Eigen/src/Core/CwiseBinaryOp.h @@ -45,56 +45,59 @@ * * \sa MatrixBase::binaryExpr(const MatrixBase &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp */ + +namespace internal { template -struct ei_traits > +struct traits > { - // we must not inherit from ei_traits since it has + // we must not inherit from traits since it has // the potential to cause problems with MSVC - typedef typename ei_cleantype::type Ancestor; - typedef typename ei_traits::XprKind XprKind; + typedef typename remove_all::type Ancestor; + typedef typename traits::XprKind XprKind; enum { - RowsAtCompileTime = ei_traits::RowsAtCompileTime, - ColsAtCompileTime = ei_traits::ColsAtCompileTime, - MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, - MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime }; // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), // we still want to handle the case when the result type is different. - typedef typename ei_result_of< + typedef typename result_of< BinaryOp( typename Lhs::Scalar, typename Rhs::Scalar ) >::type Scalar; - typedef typename ei_promote_storage_type::StorageKind, - typename ei_traits::StorageKind>::ret StorageKind; - typedef typename ei_promote_index_type::Index, - typename ei_traits::Index>::type Index; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; - typedef typename ei_unref::type _LhsNested; - typedef typename ei_unref::type _RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, - SameType = ei_is_same_type::ret, + SameType = is_same::value, StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit), Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( HereditaryBits | (int(LhsFlags) & int(RhsFlags) & ( AlignedBit | (StorageOrdersAgree ? LinearAccessBit : 0) - | (ei_functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) + | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), - CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits::Cost + CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits::Cost }; }; +} // end namespace internal // we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor // that would take two operands of different types. If there were such an example, then this check should be @@ -104,33 +107,33 @@ struct ei_traits > // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to // add together a float matrix and a double matrix. #define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \ - EIGEN_STATIC_ASSERT((ei_functor_allows_mixing_real_and_complex::ret \ - ? int(ei_is_same_type::Real, typename NumTraits::Real>::ret) \ - : int(ei_is_same_type::ret)), \ + EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex::ret \ + ? int(internal::is_same::Real, typename NumTraits::Real>::value) \ + : int(internal::is_same::value)), \ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) template class CwiseBinaryOpImpl; template -class CwiseBinaryOp : ei_no_assignment_operator, +class CwiseBinaryOp : internal::no_assignment_operator, public CwiseBinaryOpImpl< BinaryOp, Lhs, Rhs, - typename ei_promote_storage_type::StorageKind, - typename ei_traits::StorageKind>::ret> + typename internal::promote_storage_type::StorageKind, + typename internal::traits::StorageKind>::ret> { public: typedef typename CwiseBinaryOpImpl< BinaryOp, Lhs, Rhs, - typename ei_promote_storage_type::StorageKind, - typename ei_traits::StorageKind>::ret>::Base Base; + typename internal::promote_storage_type::StorageKind, + typename internal::traits::StorageKind>::ret>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) - typedef typename ei_nested::type LhsNested; - typedef typename ei_nested::type RhsNested; - typedef typename ei_unref::type _LhsNested; - typedef typename ei_unref::type _RhsNested; + typedef typename internal::nested::type LhsNested; + typedef typename internal::nested::type RhsNested; + typedef typename internal::remove_reference::type _LhsNested; + typedef typename internal::remove_reference::type _RhsNested; EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) : m_lhs(lhs), m_rhs(rhs), m_functor(func) @@ -138,19 +141,19 @@ class CwiseBinaryOp : ei_no_assignment_operator, EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar); // require the sizes to match EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) - ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); + eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); } EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time optimizations - if (ei_traits::type>::RowsAtCompileTime==Dynamic) + if (internal::traits::type>::RowsAtCompileTime==Dynamic) return m_rhs.rows(); else return m_lhs.rows(); } EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time optimizations - if (ei_traits::type>::ColsAtCompileTime==Dynamic) + if (internal::traits::type>::ColsAtCompileTime==Dynamic) return m_rhs.cols(); else return m_lhs.cols(); @@ -171,12 +174,12 @@ class CwiseBinaryOp : ei_no_assignment_operator, template class CwiseBinaryOpImpl - : public ei_dense_xpr_base >::type + : public internal::dense_xpr_base >::type { typedef CwiseBinaryOp Derived; public: - typedef typename ei_dense_xpr_base >::type Base; + typedef typename internal::dense_xpr_base >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE( Derived ) EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const @@ -215,7 +218,7 @@ template EIGEN_STRONG_INLINE Derived & MatrixBase::operator-=(const MatrixBase &other) { - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } @@ -229,7 +232,7 @@ template EIGEN_STRONG_INLINE Derived & MatrixBase::operator+=(const MatrixBase& other) { - SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); tmp = other.derived(); return derived(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/CwiseNullaryOp.h b/gtsam/3rdparty/Eigen/src/Core/CwiseNullaryOp.h index a7e6b1b6d..a2f504985 100644 --- a/gtsam/3rdparty/Eigen/src/Core/CwiseNullaryOp.h +++ b/gtsam/3rdparty/Eigen/src/Core/CwiseNullaryOp.h @@ -42,32 +42,35 @@ * * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr() */ + +namespace internal { template -struct ei_traits > : ei_traits +struct traits > : traits { enum { - Flags = (ei_traits::Flags + Flags = (traits::Flags & ( HereditaryBits - | (ei_functor_has_linear_access::ret ? LinearAccessBit : 0) - | (ei_functor_traits::PacketAccess ? PacketAccessBit : 0))) - | (ei_functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), - CoeffReadCost = ei_functor_traits::Cost + | (functor_has_linear_access::ret ? LinearAccessBit : 0) + | (functor_traits::PacketAccess ? PacketAccessBit : 0))) + | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), + CoeffReadCost = functor_traits::Cost }; }; +} template -class CwiseNullaryOp : ei_no_assignment_operator, - public ei_dense_xpr_base< CwiseNullaryOp >::type +class CwiseNullaryOp : internal::no_assignment_operator, + public internal::dense_xpr_base< CwiseNullaryOp >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) : m_rows(rows), m_cols(cols), m_functor(func) { - ei_assert(rows >= 0 + eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); @@ -99,8 +102,8 @@ class CwiseNullaryOp : ei_no_assignment_operator, } protected: - const ei_variable_if_dynamic m_rows; - const ei_variable_if_dynamic m_cols; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; const NullaryOp m_functor; }; @@ -185,7 +188,7 @@ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(Index rows, Index cols, const Scalar& value) { - return DenseBase::NullaryExpr(rows, cols, ei_scalar_constant_op(value)); + return DenseBase::NullaryExpr(rows, cols, internal::scalar_constant_op(value)); } /** \returns an expression of a constant matrix of value \a value @@ -207,7 +210,7 @@ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(Index size, const Scalar& value) { - return DenseBase::NullaryExpr(size, ei_scalar_constant_op(value)); + return DenseBase::NullaryExpr(size, internal::scalar_constant_op(value)); } /** \returns an expression of a constant matrix of value \a value @@ -224,7 +227,7 @@ EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType DenseBase::Constant(const Scalar& value) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) - return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_constant_op(value)); + return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op(value)); } /** @@ -247,7 +250,7 @@ EIGEN_STRONG_INLINE const typename DenseBase::SequentialLinSpacedReturn DenseBase::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return DenseBase::NullaryExpr(size, ei_linspaced_op(low,high,size)); + return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); } /** @@ -260,7 +263,7 @@ DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) - return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, ei_linspaced_op(low,high,Derived::SizeAtCompileTime)); + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); } /** @@ -280,7 +283,7 @@ EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedRetu DenseBase::LinSpaced(Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return DenseBase::NullaryExpr(size, ei_linspaced_op(low,high,size)); + return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); } /** @@ -293,7 +296,7 @@ DenseBase::LinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) - return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, ei_linspaced_op(low,high,Derived::SizeAtCompileTime)); + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); } /** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ @@ -303,7 +306,7 @@ bool DenseBase::isApproxToConstant { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) - if(!ei_isApprox(this->coeff(i, j), value, prec)) + if(!internal::isApprox(this->coeff(i, j), value, prec)) return false; return true; } @@ -349,7 +352,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& value */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setConstant(Index size, const Scalar& value) +PlainObjectBase::setConstant(Index size, const Scalar& value) { resize(size); return setConstant(value); @@ -368,7 +371,7 @@ DenseStorageBase::setConstant(Index size, const Scalar& value) */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setConstant(Index rows, Index cols, const Scalar& value) +PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& value) { resize(rows, cols); return setConstant(value); @@ -390,7 +393,7 @@ template EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return derived() = Derived::NullaryExpr(size, ei_linspaced_op(low,high,size)); + return derived() = Derived::NullaryExpr(size, internal::linspaced_op(low,high,size)); } // zero: @@ -469,7 +472,7 @@ bool DenseBase::isZero(RealScalar prec) const { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) - if(!ei_isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) + if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) return false; return true; } @@ -498,7 +501,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setZero() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setZero(Index size) +PlainObjectBase::setZero(Index size) { resize(size); return setConstant(Scalar(0)); @@ -516,7 +519,7 @@ DenseStorageBase::setZero(Index size) */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setZero(Index rows, Index cols) +PlainObjectBase::setZero(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(0)); @@ -624,7 +627,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setOnes(Index size) +PlainObjectBase::setOnes(Index size) { resize(size); return setConstant(Scalar(1)); @@ -642,7 +645,7 @@ DenseStorageBase::setOnes(Index size) */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setOnes(Index rows, Index cols) +PlainObjectBase::setOnes(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(1)); @@ -668,7 +671,7 @@ template EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType MatrixBase::Identity(Index rows, Index cols) { - return DenseBase::NullaryExpr(rows, cols, ei_scalar_identity_op()); + return DenseBase::NullaryExpr(rows, cols, internal::scalar_identity_op()); } /** \returns an expression of the identity matrix (not necessarily square). @@ -686,7 +689,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType MatrixBase::Identity() { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) - return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_identity_op()); + return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op()); } /** \returns true if *this is approximately equal to the identity matrix @@ -708,12 +711,12 @@ bool MatrixBase::isIdentity { if(i == j) { - if(!ei_isApprox(this->coeff(i, j), static_cast(1), prec)) + if(!internal::isApprox(this->coeff(i, j), static_cast(1), prec)) return false; } else { - if(!ei_isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) + if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) return false; } } @@ -721,8 +724,10 @@ bool MatrixBase::isIdentity return true; } +namespace internal { + template=16)> -struct ei_setIdentity_impl +struct setIdentity_impl { static EIGEN_STRONG_INLINE Derived& run(Derived& m) { @@ -731,7 +736,7 @@ struct ei_setIdentity_impl }; template -struct ei_setIdentity_impl +struct setIdentity_impl { typedef typename Derived::Index Index; static EIGEN_STRONG_INLINE Derived& run(Derived& m) @@ -743,6 +748,8 @@ struct ei_setIdentity_impl } }; +} // end namespace internal + /** Writes the identity expression (not necessarily square) into *this. * * Example: \include MatrixBase_setIdentity.cpp @@ -753,7 +760,7 @@ struct ei_setIdentity_impl template EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() { - return ei_setIdentity_impl::run(derived()); + return internal::setIdentity_impl::run(derived()); } /** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. diff --git a/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryOp.h b/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryOp.h index 4c92f36bb..958571d64 100644 --- a/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryOp.h +++ b/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryOp.h @@ -45,33 +45,36 @@ * * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp */ + +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_result_of< + typedef typename result_of< UnaryOp(typename XprType::Scalar) >::type Scalar; typedef typename XprType::Nested XprTypeNested; - typedef typename ei_unref::type _XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; enum { Flags = _XprTypeNested::Flags & ( HereditaryBits | LinearAccessBit | AlignedBit - | (ei_functor_traits::PacketAccess ? PacketAccessBit : 0)), - CoeffReadCost = _XprTypeNested::CoeffReadCost + ei_functor_traits::Cost + | (functor_traits::PacketAccess ? PacketAccessBit : 0)), + CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits::Cost }; }; +} template class CwiseUnaryOpImpl; template -class CwiseUnaryOp : ei_no_assignment_operator, - public CwiseUnaryOpImpl::StorageKind> +class CwiseUnaryOp : internal::no_assignment_operator, + public CwiseUnaryOpImpl::StorageKind> { public: - typedef typename CwiseUnaryOpImpl::StorageKind>::Base Base; + typedef typename CwiseUnaryOpImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) @@ -84,11 +87,11 @@ class CwiseUnaryOp : ei_no_assignment_operator, const UnaryOp& functor() const { return m_functor; } /** \returns the nested expression */ - const typename ei_cleantype::type& + const typename internal::remove_all::type& nestedExpression() const { return m_xpr; } /** \returns the nested expression */ - typename ei_cleantype::type& + typename internal::remove_all::type& nestedExpression() { return m_xpr.const_cast_derived(); } protected: @@ -100,12 +103,12 @@ class CwiseUnaryOp : ei_no_assignment_operator, // It can be used for any expression types implementing the dense concept. template class CwiseUnaryOpImpl - : public ei_dense_xpr_base >::type + : public internal::dense_xpr_base >::type { public: typedef CwiseUnaryOp Derived; - typedef typename ei_dense_xpr_base >::type Base; + typedef typename internal::dense_xpr_base >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const diff --git a/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryView.h b/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryView.h index 37c58223e..d24ef0373 100644 --- a/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryView.h +++ b/gtsam/3rdparty/Eigen/src/Core/CwiseUnaryView.h @@ -38,39 +38,42 @@ * * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp */ + +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_result_of< - ViewOp(typename ei_traits::Scalar) + typedef typename result_of< + ViewOp(typename traits::Scalar) >::type Scalar; typedef typename MatrixType::Nested MatrixTypeNested; - typedef typename ei_cleantype::type _MatrixTypeNested; + typedef typename remove_all::type _MatrixTypeNested; enum { - Flags = (ei_traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)), - CoeffReadCost = ei_traits<_MatrixTypeNested>::CoeffReadCost + ei_functor_traits::Cost, - MatrixTypeInnerStride = ei_inner_stride_at_compile_time::ret, + Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)), + CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits::Cost, + MatrixTypeInnerStride = inner_stride_at_compile_time::ret, // need to cast the sizeof's from size_t to int explicitly, otherwise: // "error: no integral type can represent all of the enumerator values InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic ? int(Dynamic) : int(MatrixTypeInnerStride) - * int(sizeof(typename ei_traits::Scalar) / sizeof(Scalar)), - OuterStrideAtCompileTime = ei_outer_stride_at_compile_time::ret + * int(sizeof(typename traits::Scalar) / sizeof(Scalar)), + OuterStrideAtCompileTime = outer_stride_at_compile_time::ret }; }; +} template class CwiseUnaryViewImpl; template -class CwiseUnaryView : ei_no_assignment_operator, - public CwiseUnaryViewImpl::StorageKind> +class CwiseUnaryView : internal::no_assignment_operator, + public CwiseUnaryViewImpl::StorageKind> { public: - typedef typename CwiseUnaryViewImpl::StorageKind>::Base Base; + typedef typename CwiseUnaryViewImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) inline CwiseUnaryView(const MatrixType& mat, const ViewOp& func = ViewOp()) @@ -85,33 +88,33 @@ class CwiseUnaryView : ei_no_assignment_operator, const ViewOp& functor() const { return m_functor; } /** \returns the nested expression */ - const typename ei_cleantype::type& + const typename internal::remove_all::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ - typename ei_cleantype::type& + typename internal::remove_all::type& nestedExpression() { return m_matrix.const_cast_derived(); } protected: // FIXME changed from MatrixType::Nested because of a weird compilation error with sun CC - const typename ei_nested::type m_matrix; + const typename internal::nested::type m_matrix; ViewOp m_functor; }; template class CwiseUnaryViewImpl - : public ei_dense_xpr_base< CwiseUnaryView >::type + : public internal::dense_xpr_base< CwiseUnaryView >::type { public: typedef CwiseUnaryView Derived; - typedef typename ei_dense_xpr_base< CwiseUnaryView >::type Base; + typedef typename internal::dense_xpr_base< CwiseUnaryView >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) inline Index innerStride() const { - return derived().nestedExpression().innerStride() * sizeof(typename ei_traits::Scalar) / sizeof(Scalar); + return derived().nestedExpression().innerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); } inline Index outerStride() const diff --git a/gtsam/3rdparty/Eigen/src/Core/DenseBase.h b/gtsam/3rdparty/Eigen/src/Core/DenseBase.h index 3ef58ab03..838fa4030 100644 --- a/gtsam/3rdparty/Eigen/src/Core/DenseBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/DenseBase.h @@ -34,28 +34,37 @@ * This class is the base that is inherited by all dense objects (matrix, vector, arrays, * and related expression types). The common Eigen API for dense objects is contained in this class. * - * \param Derived is the derived type, e.g., a matrix type or an expression. + * \tparam Derived is the derived type, e.g., a matrix type or an expression. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. * * \sa \ref TopicClassHierarchy */ template class DenseBase #ifndef EIGEN_PARSED_BY_DOXYGEN - : public ei_special_scalar_op_base::Scalar, - typename NumTraits::Scalar>::Real> + : public internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real> #else : public DenseCoeffsBase #endif // not EIGEN_PARSED_BY_DOXYGEN { public: - using ei_special_scalar_op_base::Scalar, - typename NumTraits::Scalar>::Real>::operator*; + using internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real>::operator*; class InnerIterator; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; /**< The type of indices */ - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + + /** \brief The type of indices + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa \ref TopicPreprocessorDirectives. + */ + typedef typename internal::traits::Index Index; + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseCoeffsBase Base; @@ -93,26 +102,26 @@ template class DenseBase enum { - RowsAtCompileTime = ei_traits::RowsAtCompileTime, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ - ColsAtCompileTime = ei_traits::ColsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ - SizeAtCompileTime = (ei_size_at_compile_time::RowsAtCompileTime, - ei_traits::ColsAtCompileTime>::ret), + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ - MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, /**< This value is equal to the maximum possible number of rows that this expression * might have. If this expression might have an arbitrarily high number of rows, * this value is set to \a Dynamic. @@ -123,7 +132,7 @@ template class DenseBase * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime */ - MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, /**< This value is equal to the maximum possible number of columns that this expression * might have. If this expression might have an arbitrarily high number of columns, * this value is set to \a Dynamic. @@ -134,8 +143,8 @@ template class DenseBase * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime */ - MaxSizeAtCompileTime = (ei_size_at_compile_time::MaxRowsAtCompileTime, - ei_traits::MaxColsAtCompileTime>::ret), + MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime>::ret), /**< This value is equal to the maximum possible number of coefficients that this expression * might have. If this expression might have an arbitrarily high number of coefficients, * this value is set to \a Dynamic. @@ -146,14 +155,14 @@ template class DenseBase * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime */ - IsVectorAtCompileTime = ei_traits::MaxRowsAtCompileTime == 1 - || ei_traits::MaxColsAtCompileTime == 1, + IsVectorAtCompileTime = internal::traits::MaxRowsAtCompileTime == 1 + || internal::traits::MaxColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ - Flags = ei_traits::Flags, + Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ @@ -163,15 +172,17 @@ template class DenseBase InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? SizeAtCompileTime : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime, - CoeffReadCost = ei_traits::CoeffReadCost, + CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ - InnerStrideAtCompileTime = ei_inner_stride_at_compile_time::ret, - OuterStrideAtCompileTime = ei_outer_stride_at_compile_time::ret + InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, + OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret }; + enum { ThisConstantIsPrivateInPlainObjectBase }; + /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return size(); } @@ -183,8 +194,8 @@ template class DenseBase /** \returns the outer size. * * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension - * with respect to the storage order, i.e., the number of columns for a column-major matrix, - * and the number of rows for a row-major matrix. */ + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a + * column-major matrix, and the number of rows for a row-major matrix. */ Index outerSize() const { return IsVectorAtCompileTime ? 1 @@ -194,8 +205,8 @@ template class DenseBase /** \returns the inner size. * * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension - * with respect to the storage order, i.e., the number of rows for a column-major matrix, - * and the number of columns for a row-major matrix. */ + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a + * column-major matrix, and the number of columns for a row-major matrix. */ Index innerSize() const { return IsVectorAtCompileTime ? this->size() @@ -209,7 +220,7 @@ template class DenseBase void resize(Index size) { EIGEN_ONLY_USED_FOR_DEBUG(size); - ei_assert(size == this->size() + eigen_assert(size == this->size() && "DenseBase::resize() does not actually allow to resize."); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are @@ -220,20 +231,20 @@ template class DenseBase { EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(cols); - ei_assert(rows == this->rows() && cols == this->cols() + eigen_assert(rows == this->rows() && cols == this->cols() && "DenseBase::resize() does not actually allow to resize."); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ - typedef CwiseNullaryOp,Derived> ConstantReturnType; + typedef CwiseNullaryOp,Derived> ConstantReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */ - typedef CwiseNullaryOp,Derived> SequentialLinSpacedReturnType; + typedef CwiseNullaryOp,Derived> SequentialLinSpacedReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ - typedef CwiseNullaryOp,Derived> RandomAccessLinSpacedReturnType; + typedef CwiseNullaryOp,Derived> RandomAccessLinSpacedReturnType; /** \internal the return type of MatrixBase::eigenvalues() */ - typedef Matrix::Scalar>::Real, ei_traits::ColsAtCompileTime, 1> EigenvaluesReturnType; + typedef Matrix::Scalar>::Real, internal::traits::ColsAtCompileTime, 1> EigenvaluesReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN @@ -273,7 +284,8 @@ template class DenseBase CommaInitializer operator<< (const DenseBase& other); Eigen::Transpose transpose(); - const Eigen::Transpose transpose() const; + typedef const Transpose ConstTransposeReturnType; + ConstTransposeReturnType transpose() const; void transposeInPlace(); #ifndef EIGEN_NO_DEBUG protected: @@ -282,41 +294,29 @@ template class DenseBase public: #endif - VectorBlock segment(Index start, Index size); - const VectorBlock segment(Index start, Index size) const; + typedef VectorBlock SegmentReturnType; + typedef const VectorBlock ConstSegmentReturnType; + template struct FixedSegmentReturnType { typedef VectorBlock Type; }; + template struct ConstFixedSegmentReturnType { typedef const VectorBlock Type; }; + + // Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. + SegmentReturnType segment(Index start, Index size); + typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const; - VectorBlock head(Index size); - const VectorBlock head(Index size) const; + SegmentReturnType head(Index size); + typename DenseBase::ConstSegmentReturnType head(Index size) const; - VectorBlock tail(Index size); - const VectorBlock tail(Index size) const; + SegmentReturnType tail(Index size); + typename DenseBase::ConstSegmentReturnType tail(Index size) const; - template VectorBlock head(void); - template const VectorBlock head() const; + template typename FixedSegmentReturnType::Type head(); + template typename ConstFixedSegmentReturnType::Type head() const; - template VectorBlock tail(); - template const VectorBlock tail() const; + template typename FixedSegmentReturnType::Type tail(); + template typename ConstFixedSegmentReturnType::Type tail() const; - template VectorBlock segment(Index start); - template const VectorBlock segment(Index start) const; - - Diagonal diagonal(); - const Diagonal diagonal() const; - - template Diagonal diagonal(); - template const Diagonal diagonal() const; - - Diagonal diagonal(Index index); - const Diagonal diagonal(Index index) const; - - template TriangularView part(); - template const TriangularView part() const; - - template TriangularView triangularView(); - template const TriangularView triangularView() const; - - template SelfAdjointView selfadjointView(); - template const SelfAdjointView selfadjointView() const; + template typename FixedSegmentReturnType::Type segment(Index start); + template typename ConstFixedSegmentReturnType::Type segment(Index start) const; static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); @@ -381,22 +381,39 @@ template class DenseBase * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ - EIGEN_STRONG_INLINE const typename ei_eval::type eval() const + EIGEN_STRONG_INLINE const typename internal::eval::type eval() const { // Even though MSVC does not honor strong inlining when the return type // is a dynamic matrix, we desperately need strong inlining for fixed // size types on MSVC. - return typename ei_eval::type(derived()); + return typename internal::eval::type(derived()); } + /** swaps *this with the expression \a other. + * + */ template - void swap(DenseBase EIGEN_REF_TO_TEMPORARY other); + void swap(const DenseBase& other, + int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase) + { + SwapWrapper(derived()).lazyAssign(other.derived()); + } + + /** swaps *this with the matrix or array \a other. + * + */ + template + void swap(PlainObjectBase& other) + { + SwapWrapper(derived()).lazyAssign(other.derived()); + } + inline const NestByValue nestByValue() const; inline const ForceAlignedAccess forceAlignedAccess() const; inline ForceAlignedAccess forceAlignedAccess(); - template inline const typename ei_meta_if,Derived&>::ret forceAlignedAccessIf() const; - template inline typename ei_meta_if,Derived&>::ret forceAlignedAccessIf(); + template inline const typename internal::conditional,Derived&>::type forceAlignedAccessIf() const; + template inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); Scalar sum() const; Scalar mean() const; @@ -404,17 +421,20 @@ template class DenseBase Scalar prod() const; - typename ei_traits::Scalar minCoeff() const; - typename ei_traits::Scalar maxCoeff() const; + typename internal::traits::Scalar minCoeff() const; + typename internal::traits::Scalar maxCoeff() const; - typename ei_traits::Scalar minCoeff(Index* row, Index* col) const; - typename ei_traits::Scalar maxCoeff(Index* row, Index* col) const; - - typename ei_traits::Scalar minCoeff(Index* index) const; - typename ei_traits::Scalar maxCoeff(Index* index) const; + template + typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const; + template + typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const; + template + typename internal::traits::Scalar minCoeff(IndexType* index) const; + template + typename internal::traits::Scalar maxCoeff(IndexType* index) const; template - typename ei_result_of::Scalar)>::type + typename internal::result_of::Scalar)>::type redux(const BinaryOp& func) const; template @@ -422,20 +442,33 @@ template class DenseBase inline const WithFormat format(const IOFormat& fmt) const; + /** \returns the unique coefficient of a 1x1 expression */ + CoeffReturnType value() const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeff(0,0); + } + /////////// Array module /////////// bool all(void) const; bool any(void) const; Index count() const; - const VectorwiseOp rowwise() const; - VectorwiseOp rowwise(); - const VectorwiseOp colwise() const; - VectorwiseOp colwise(); + typedef VectorwiseOp RowwiseReturnType; + typedef const VectorwiseOp ConstRowwiseReturnType; + typedef VectorwiseOp ColwiseReturnType; + typedef const VectorwiseOp ConstColwiseReturnType; - static const CwiseNullaryOp,Derived> Random(Index rows, Index cols); - static const CwiseNullaryOp,Derived> Random(Index size); - static const CwiseNullaryOp,Derived> Random(); + ConstRowwiseReturnType rowwise() const; + RowwiseReturnType rowwise(); + ConstColwiseReturnType colwise() const; + ColwiseReturnType colwise(); + + static const CwiseNullaryOp,Derived> Random(Index rows, Index cols); + static const CwiseNullaryOp,Derived> Random(Index size); + static const CwiseNullaryOp,Derived> Random(); template const Select @@ -456,8 +489,10 @@ template class DenseBase const Replicate replicate() const; const Replicate replicate(Index rowFacor,Index colFactor) const; - Eigen::Reverse reverse(); - const Eigen::Reverse reverse() const; + typedef Reverse ReverseReturnType; + typedef const Reverse ConstReverseReturnType; + ReverseReturnType reverse(); + ConstReverseReturnType reverse() const; void reverseInPlace(); #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase @@ -482,7 +517,7 @@ template class DenseBase // disable the use of evalTo for dense objects with a nice compilation error template inline void evalTo(Dest& ) const { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); + EIGEN_STATIC_ASSERT((internal::is_same::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); } protected: @@ -493,8 +528,6 @@ template class DenseBase * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down */ #ifdef EIGEN_INTERNAL_DEBUGGING - EIGEN_STATIC_ASSERT(ei_are_flags_consistent::ret, - INVALID_MATRIXBASE_TEMPLATE_PARAMETERS) EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) diff --git a/gtsam/3rdparty/Eigen/src/Core/DenseCoeffsBase.h b/gtsam/3rdparty/Eigen/src/Core/DenseCoeffsBase.h index 918b246a8..7838a1cfb 100644 --- a/gtsam/3rdparty/Eigen/src/Core/DenseCoeffsBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/DenseCoeffsBase.h @@ -25,6 +25,13 @@ #ifndef EIGEN_DENSECOEFFSBASE_H #define EIGEN_DENSECOEFFSBASE_H +namespace internal { +template struct add_const_on_value_type_if_arithmetic +{ + typedef typename conditional::value, T, typename add_const_on_value_type::type>::type type; +}; +} + /** \brief Base class providing read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class @@ -40,15 +47,26 @@ template class DenseCoeffsBase : public EigenBase { public: - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; - typedef typename ei_meta_if::Flags&LvalueBit), - const Scalar&, - typename ei_meta_if::ret, Scalar, const Scalar>::ret - >::ret CoeffReturnType; - typedef typename ei_makeconst_return_type::type>::type PacketReturnType; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + + // Explanation for this CoeffReturnType typedef. + // - This is the return type of the coeff() method. + // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references + // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). + // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems + // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is + // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. + typedef typename internal::conditional::Flags&LvalueBit), + const Scalar&, + typename internal::conditional::value, Scalar, const Scalar>::type + >::type CoeffReturnType; + + typedef typename internal::add_const_on_value_type_if_arithmetic< + typename internal::packet_traits::type + >::type PacketReturnType; typedef EigenBase Base; using Base::rows; @@ -88,7 +106,7 @@ class DenseCoeffsBase : public EigenBase */ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeff(row, col); } @@ -105,7 +123,7 @@ class DenseCoeffsBase : public EigenBase */ EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const { - ei_assert(row >= 0 && row < rows() + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeff(row, col); } @@ -128,7 +146,7 @@ class DenseCoeffsBase : public EigenBase EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { - ei_internal_assert(index >= 0 && index < size()); + eigen_internal_assert(index >= 0 && index < size()); return derived().coeff(index); } @@ -144,9 +162,11 @@ class DenseCoeffsBase : public EigenBase EIGEN_STRONG_INLINE CoeffReturnType operator[](Index index) const { + #ifndef EIGEN2_SUPPORT EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) - ei_assert(index >= 0 && index < size()); + #endif + eigen_assert(index >= 0 && index < size()); return derived().coeff(index); } @@ -163,7 +183,7 @@ class DenseCoeffsBase : public EigenBase EIGEN_STRONG_INLINE CoeffReturnType operator()(Index index) const { - ei_assert(index >= 0 && index < size()); + eigen_assert(index >= 0 && index < size()); return derived().coeff(index); } @@ -187,7 +207,8 @@ class DenseCoeffsBase : public EigenBase EIGEN_STRONG_INLINE CoeffReturnType w() const { return (*this)[3]; } - /** \returns the packet of coefficients starting at the given row and column. It is your responsibility + /** \internal + * \returns the packet of coefficients starting at the given row and column. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit. * @@ -199,12 +220,13 @@ class DenseCoeffsBase : public EigenBase template EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const { - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().template packet(row,col); } + /** \internal */ template EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const { @@ -212,7 +234,8 @@ class DenseCoeffsBase : public EigenBase colIndexByOuterInner(outer, inner)); } - /** \returns the packet of coefficients starting at the given index. It is your responsibility + /** \internal + * \returns the packet of coefficients starting at the given index. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit and the LinearAccessBit. * @@ -224,13 +247,13 @@ class DenseCoeffsBase : public EigenBase template EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { - ei_internal_assert(index >= 0 && index < size()); + eigen_internal_assert(index >= 0 && index < size()); return derived().template packet(index); } protected: // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. - // But some methods are only available in the EnableDirectAccessAPI case. + // But some methods are only available in the DirectAccess case. // So we add dummy methods here with these names, so that "using... " doesn't fail. // It's not private so that the child class DenseBase can access them, and it's not public // either since it's an implementation detail, so has to be protected. @@ -267,10 +290,10 @@ class DenseCoeffsBase : public DenseCoeffsBase Base; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; using Base::coeff; @@ -303,7 +326,7 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeffRef(row, col); } @@ -323,7 +346,7 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && row < rows() + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeffRef(row, col); } @@ -347,7 +370,7 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && index < size()); + eigen_internal_assert(index >= 0 && index < size()); return derived().coeffRef(index); } @@ -361,9 +384,11 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && index < size()); + #endif + eigen_assert(index >= 0 && index < size()); return derived().coeffRef(index); } @@ -379,7 +404,7 @@ class DenseCoeffsBase : public DenseCoeffsBase= 0 && index < size()); + eigen_assert(index >= 0 && index < size()); return derived().coeffRef(index); } @@ -403,7 +428,8 @@ class DenseCoeffsBase : public DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void writePacket - (Index row, Index col, const typename ei_packet_traits::type& x) + (Index row, Index col, const typename internal::packet_traits::type& x) { - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); derived().template writePacket(row,col,x); } + /** \internal */ template EIGEN_STRONG_INLINE void writePacketByOuterInner - (Index outer, Index inner, const typename ei_packet_traits::type& x) + (Index outer, Index inner, const typename internal::packet_traits::type& x) { writePacket(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner), x); } - /** Stores the given packet of coefficients, at the given index in this expression. It is your responsibility + /** \internal + * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit and the LinearAccessBit. * @@ -439,12 +467,11 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void writePacket - (Index index, const typename ei_packet_traits::type& x) + (Index index, const typename internal::packet_traits::type& x) { - ei_internal_assert(index >= 0 && index < size()); + eigen_internal_assert(index >= 0 && index < size()); derived().template writePacket(index,x); } @@ -461,7 +488,7 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase& other) { - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); derived().coeffRef(row, col) = other.derived().coeff(row, col); } @@ -477,7 +504,7 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase& other) { - ei_internal_assert(index >= 0 && index < size()); + eigen_internal_assert(index >= 0 && index < size()); derived().coeffRef(index) = other.derived().coeff(index); } @@ -502,7 +529,7 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase& other) { - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); derived().template writePacket(row, col, other.derived().template packet(row, col)); @@ -519,11 +546,12 @@ class DenseCoeffsBase : public DenseCoeffsBase EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase& other) { - ei_internal_assert(index >= 0 && index < size()); + eigen_internal_assert(index >= 0 && index < size()); derived().template writePacket(index, other.derived().template packet(index)); } + /** \internal */ template EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase& other) { @@ -536,25 +564,25 @@ class DenseCoeffsBase : public DenseCoeffsBase which defines functions to access entries using + * inherits DenseCoeffsBase which defines functions to access entries read-only using * \c operator() . * * \sa \ref TopicClassHierarchy */ template -class DenseCoeffsBase : public DenseCoeffsBase +class DenseCoeffsBase : public DenseCoeffsBase { public: - typedef DenseCoeffsBase Base; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; using Base::rows; @@ -606,57 +634,132 @@ class DenseCoeffsBase : public DenseCoeffsBase which defines functions to access entries read/write using + * \c operator(). + * + * \sa \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase + : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + inline Index innerStride() const + { + return derived().innerStride(); + } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + inline Index outerStride() const + { + return derived().outerStride(); + } + + // FIXME shall we remove it ? + inline Index stride() const + { + return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); + } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + inline Index rowStride() const + { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + inline Index colStride() const + { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +namespace internal { + template -struct ei_first_aligned_impl +struct first_aligned_impl { inline static typename Derived::Index run(const Derived&) { return 0; } }; template -struct ei_first_aligned_impl +struct first_aligned_impl { inline static typename Derived::Index run(const Derived& m) { - return ei_first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); + return first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); } }; /** \internal \returns the index of the first element of the array that is well aligned for vectorization. * - * There is also the variant ei_first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more + * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more * documentation. */ template -inline static typename Derived::Index ei_first_aligned(const Derived& m) +inline static typename Derived::Index first_aligned(const Derived& m) { - return ei_first_aligned_impl + return first_aligned_impl ::run(m); } -template::ret> -struct ei_inner_stride_at_compile_time +template::ret> +struct inner_stride_at_compile_time { - enum { ret = ei_traits::InnerStrideAtCompileTime }; + enum { ret = traits::InnerStrideAtCompileTime }; }; template -struct ei_inner_stride_at_compile_time +struct inner_stride_at_compile_time { enum { ret = 0 }; }; -template::ret> -struct ei_outer_stride_at_compile_time +template::ret> +struct outer_stride_at_compile_time { - enum { ret = ei_traits::OuterStrideAtCompileTime }; + enum { ret = traits::OuterStrideAtCompileTime }; }; template -struct ei_outer_stride_at_compile_time +struct outer_stride_at_compile_time { enum { ret = 0 }; }; +} // end namespace internal + #endif // EIGEN_DENSECOEFFSBASE_H diff --git a/gtsam/3rdparty/Eigen/src/Core/MatrixStorage.h b/gtsam/3rdparty/Eigen/src/Core/DenseStorage.h similarity index 50% rename from gtsam/3rdparty/Eigen/src/Core/MatrixStorage.h rename to gtsam/3rdparty/Eigen/src/Core/DenseStorage.h index c6e7e20b2..1bcaf4c56 100644 --- a/gtsam/3rdparty/Eigen/src/Core/MatrixStorage.h +++ b/gtsam/3rdparty/Eigen/src/Core/DenseStorage.h @@ -27,58 +27,62 @@ #ifndef EIGEN_MATRIXSTORAGE_H #define EIGEN_MATRIXSTORAGE_H -#ifdef EIGEN_DEBUG_MATRIX_CTOR - #define EIGEN_INT_DEBUG_MATRIX_CTOR EIGEN_DEBUG_MATRIX_CTOR; +#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN; #else - #define EIGEN_INT_DEBUG_MATRIX_CTOR + #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN #endif -struct ei_constructor_without_unaligned_array_assert {}; +namespace internal { + +struct constructor_without_unaligned_array_assert {}; /** \internal - * Static array. If the MatrixOptions require auto-alignment, the array will be automatically aligned: + * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * to 16 bytes boundary if the total size is a multiple of 16 bytes. */ -template -struct ei_matrix_array +struct plain_array { T array[Size]; - ei_matrix_array() {} - ei_matrix_array(ei_constructor_without_unaligned_array_assert) {} + plain_array() {} + plain_array(constructor_without_unaligned_array_assert) {} }; #ifdef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) #else #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ - ei_assert((reinterpret_cast(array) & sizemask) == 0 \ + eigen_assert((reinterpret_cast(array) & sizemask) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #endif -template -struct ei_matrix_array +template +struct plain_array { - EIGEN_ALIGN16 T array[Size]; - ei_matrix_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) } - ei_matrix_array(ei_constructor_without_unaligned_array_assert) {} + EIGEN_USER_ALIGN16 T array[Size]; + plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) } + plain_array(constructor_without_unaligned_array_assert) {} }; -template -struct ei_matrix_array +template +struct plain_array { - EIGEN_ALIGN16 T array[1]; - ei_matrix_array() {} - ei_matrix_array(ei_constructor_without_unaligned_array_assert) {} + EIGEN_USER_ALIGN16 T array[1]; + plain_array() {} + plain_array(constructor_without_unaligned_array_assert) {} }; +} // end namespace internal + /** \internal * - * \class ei_matrix_storage + * \class DenseStorage * \ingroup Core_Module * * \brief Stores the data of a matrix @@ -88,18 +92,18 @@ struct ei_matrix_array * * \sa Matrix */ -template class ei_matrix_storage; +template class DenseStorage; // purely fixed-size matrix -template class ei_matrix_storage +template class DenseStorage { - ei_matrix_array m_data; + internal::plain_array m_data; public: - inline explicit ei_matrix_storage() {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) - : m_data(ei_constructor_without_unaligned_array_assert()) {} - inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {} - inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); } + inline explicit DenseStorage() {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()) {} + inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } inline static DenseIndex rows(void) {return _Rows;} inline static DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} @@ -109,13 +113,13 @@ template class ei_matr }; // null matrix -template class ei_matrix_storage +template class DenseStorage { public: - inline explicit ei_matrix_storage() {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) {} - inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {} - inline void swap(ei_matrix_storage& ) {} + inline explicit DenseStorage() {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) {} + inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + inline void swap(DenseStorage& ) {} inline static DenseIndex rows(void) {return _Rows;} inline static DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} @@ -125,17 +129,17 @@ template class ei_matrix_storage }; // dynamic-size matrix with fixed-size storage -template class ei_matrix_storage +template class DenseStorage { - ei_matrix_array m_data; + internal::plain_array m_data; DenseIndex m_rows; DenseIndex m_cols; public: - inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) - : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} - inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {} - inline void swap(ei_matrix_storage& other) + inline explicit DenseStorage() : m_rows(0), m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} + inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } inline DenseIndex rows(void) const {return m_rows;} inline DenseIndex cols(void) const {return m_cols;} @@ -146,16 +150,16 @@ template class ei_matrix_storage class ei_matrix_storage +template class DenseStorage { - ei_matrix_array m_data; + internal::plain_array m_data; DenseIndex m_rows; public: - inline explicit ei_matrix_storage() : m_rows(0) {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) - : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {} - inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {} - inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + inline explicit DenseStorage() : m_rows(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} + inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline DenseIndex rows(void) const {return m_rows;} inline DenseIndex cols(void) const {return _Cols;} inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } @@ -165,16 +169,16 @@ template class ei_matrix_storage< }; // dynamic-size matrix with fixed-size storage and fixed height -template class ei_matrix_storage +template class DenseStorage { - ei_matrix_array m_data; + internal::plain_array m_data; DenseIndex m_cols; public: - inline explicit ei_matrix_storage() : m_cols(0) {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) - : m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {} - inline ei_matrix_storage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {} - inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + inline explicit DenseStorage() : m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} + inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } inline DenseIndex rows(void) const {return _Rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } @@ -184,26 +188,26 @@ template class ei_matrix_storage< }; // purely dynamic matrix. -template class ei_matrix_storage +template class DenseStorage { T *m_data; DenseIndex m_rows; DenseIndex m_cols; public: - inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) + inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0), m_cols(0) {} - inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex cols) - : m_data(ei_conditional_aligned_new(size)), m_rows(rows), m_cols(cols) - { EIGEN_INT_DEBUG_MATRIX_CTOR } - inline ~ei_matrix_storage() { ei_conditional_aligned_delete(m_data, m_rows*m_cols); } - inline void swap(ei_matrix_storage& other) + inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols) + : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows), m_cols(cols) + { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } + inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } inline DenseIndex rows(void) const {return m_rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols) { - m_data = ei_conditional_aligned_realloc_new(m_data, size, m_rows*m_cols); + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); m_rows = rows; m_cols = cols; } @@ -211,12 +215,12 @@ template class ei_matrix_storage(m_data, m_rows*m_cols); + internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); if (size) - m_data = ei_conditional_aligned_new(size); + m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; - EIGEN_INT_DEBUG_MATRIX_CTOR + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } m_rows = rows; m_cols = cols; @@ -226,34 +230,34 @@ template class ei_matrix_storage class ei_matrix_storage +template class DenseStorage { T *m_data; DenseIndex m_cols; public: - inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} - inline ei_matrix_storage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(ei_conditional_aligned_new(size)), m_cols(cols) - { EIGEN_INT_DEBUG_MATRIX_CTOR } - inline ~ei_matrix_storage() { ei_conditional_aligned_delete(m_data, _Rows*m_cols); } - inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + inline explicit DenseStorage() : m_data(0), m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} + inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(cols) + { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } + inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } inline static DenseIndex rows(void) {return _Rows;} inline DenseIndex cols(void) const {return m_cols;} inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols) { - m_data = ei_conditional_aligned_realloc_new(m_data, size, _Rows*m_cols); + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); m_cols = cols; } EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols) { if(size != _Rows*m_cols) { - ei_conditional_aligned_delete(m_data, _Rows*m_cols); + internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); if (size) - m_data = ei_conditional_aligned_new(size); + m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; - EIGEN_INT_DEBUG_MATRIX_CTOR + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } m_cols = cols; } @@ -262,34 +266,34 @@ template class ei_matrix_storage class ei_matrix_storage +template class DenseStorage { T *m_data; DenseIndex m_rows; public: - inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {} - inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} - inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(ei_conditional_aligned_new(size)), m_rows(rows) - { EIGEN_INT_DEBUG_MATRIX_CTOR } - inline ~ei_matrix_storage() { ei_conditional_aligned_delete(m_data, _Cols*m_rows); } - inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + inline explicit DenseStorage() : m_data(0), m_rows(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} + inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows) + { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } + inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline DenseIndex rows(void) const {return m_rows;} inline static DenseIndex cols(void) {return _Cols;} inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex) { - m_data = ei_conditional_aligned_realloc_new(m_data, size, m_rows*_Cols); + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); m_rows = rows; } EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex) { if(size != m_rows*_Cols) { - ei_conditional_aligned_delete(m_data, _Cols*m_rows); + internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); if (size) - m_data = ei_conditional_aligned_new(size); + m_data = internal::conditional_aligned_new_auto(size); else m_data = 0; - EIGEN_INT_DEBUG_MATRIX_CTOR + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } m_rows = rows; } diff --git a/gtsam/3rdparty/Eigen/src/Core/Diagonal.h b/gtsam/3rdparty/Eigen/src/Core/Diagonal.h index 0b7d14179..e807a49e4 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Diagonal.h +++ b/gtsam/3rdparty/Eigen/src/Core/Diagonal.h @@ -43,12 +43,14 @@ * * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) */ + +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; typedef typename MatrixType::StorageKind StorageKind; enum { AbsDiagIndex = DiagIndex<0 ? -DiagIndex : DiagIndex, // only used if DiagIndex != Dynamic @@ -62,23 +64,25 @@ struct ei_traits > MatrixType::MaxColsAtCompileTime) : (EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsDiagIndex), MaxColsAtCompileTime = 1, - Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | LvalueBit | DirectAccessBit) & ~RowMajorBit, + MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, CoeffReadCost = _MatrixTypeNested::CoeffReadCost, - MatrixTypeOuterStride = ei_outer_stride_at_compile_time::ret, + MatrixTypeOuterStride = outer_stride_at_compile_time::ret, InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, OuterStrideAtCompileTime = 0 }; }; +} template class Diagonal - : public ei_dense_xpr_base< Diagonal >::type + : public internal::dense_xpr_base< Diagonal >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) - inline Diagonal(const MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {} + inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) @@ -98,6 +102,12 @@ template class Diagonal } inline Scalar& coeffRef(Index row, Index) + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); + } + + inline const Scalar& coeffRef(Index row, Index) const { return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); } @@ -108,6 +118,12 @@ template class Diagonal } inline Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); + } + + inline const Scalar& coeffRef(Index index) const { return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); } @@ -119,7 +135,7 @@ template class Diagonal protected: const typename MatrixType::Nested m_matrix; - const ei_variable_if_dynamic m_index; + const internal::variable_if_dynamic m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... @@ -140,18 +156,18 @@ template class Diagonal * * \sa class Diagonal */ template -inline Diagonal +inline typename MatrixBase::DiagonalReturnType MatrixBase::diagonal() { - return Diagonal(derived()); + return derived(); } /** This is the const version of diagonal(). */ template -inline const Diagonal +inline const typename MatrixBase::ConstDiagonalReturnType MatrixBase::diagonal() const { - return Diagonal(derived()); + return ConstDiagonalReturnType(derived()); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this @@ -166,18 +182,18 @@ MatrixBase::diagonal() const * * \sa MatrixBase::diagonal(), class Diagonal */ template -inline Diagonal +inline typename MatrixBase::template DiagonalIndexReturnType::Type MatrixBase::diagonal(Index index) { - return Diagonal(derived(), index); + return typename DiagonalIndexReturnType::Type(derived(), index); } /** This is the const version of diagonal(Index). */ template -inline const Diagonal +inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type MatrixBase::diagonal(Index index) const { - return Diagonal(derived(), index); + return typename ConstDiagonalIndexReturnType::Type(derived(), index); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this @@ -192,20 +208,20 @@ MatrixBase::diagonal(Index index) const * * \sa MatrixBase::diagonal(), class Diagonal */ template -template -inline Diagonal +template +inline typename MatrixBase::template DiagonalIndexReturnType::Type MatrixBase::diagonal() { - return Diagonal(derived()); + return derived(); } /** This is the const version of diagonal(). */ template -template -inline const Diagonal +template +inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type MatrixBase::diagonal() const { - return Diagonal(derived()); + return derived(); } #endif // EIGEN_DIAGONAL_H diff --git a/gtsam/3rdparty/Eigen/src/Core/DiagonalMatrix.h b/gtsam/3rdparty/Eigen/src/Core/DiagonalMatrix.h index 630c172ed..f41a74bfa 100644 --- a/gtsam/3rdparty/Eigen/src/Core/DiagonalMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/DiagonalMatrix.h @@ -31,10 +31,10 @@ template class DiagonalBase : public EigenBase { public: - typedef typename ei_traits::DiagonalVectorType DiagonalVectorType; + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, @@ -46,6 +46,8 @@ class DiagonalBase : public EigenBase }; typedef Matrix DenseMatrixType; + typedef DenseMatrixType DenseType; + typedef DiagonalMatrix PlainObject; inline const Derived& derived() const { return *static_cast(this); } inline Derived& derived() { return *static_cast(this); } @@ -70,11 +72,24 @@ class DiagonalBase : public EigenBase const DiagonalProduct operator*(const MatrixBase &matrix) const; - inline const DiagonalWrapper, DiagonalVectorType> > + inline const DiagonalWrapper, const DiagonalVectorType> > inverse() const { return diagonal().cwiseInverse(); } + + #ifdef EIGEN2_SUPPORT + template + bool isApprox(const DiagonalBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const + { + return diagonal().isApprox(other.diagonal(), precision); + } + template + bool isApprox(const MatrixBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const + { + return toDenseMatrix().isApprox(other, precision); + } + #endif }; template @@ -98,9 +113,11 @@ void DiagonalBase::evalTo(MatrixBase &other) const * * \sa class DiagonalWrapper */ + +namespace internal { template -struct ei_traits > - : ei_traits > +struct traits > + : traits > { typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef Dense StorageKind; @@ -109,18 +126,18 @@ struct ei_traits Flags = LvalueBit }; }; - +} template class DiagonalMatrix : public DiagonalBase > { public: #ifndef EIGEN_PARSED_BY_DOXYGEN - typedef typename ei_traits::DiagonalVectorType DiagonalVectorType; + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; typedef const DiagonalMatrix& Nested; typedef _Scalar Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; #endif protected: @@ -204,8 +221,10 @@ class DiagonalMatrix * * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal() */ + +namespace internal { template -struct ei_traits > +struct traits > { typedef _DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; @@ -216,13 +235,14 @@ struct ei_traits > ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, - Flags = ei_traits::Flags & LvalueBit + Flags = traits::Flags & LvalueBit }; }; +} template class DiagonalWrapper - : public DiagonalBase >, ei_no_assignment_operator + : public DiagonalBase >, internal::no_assignment_operator { public: #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -250,7 +270,7 @@ class DiagonalWrapper * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal() **/ template -inline const DiagonalWrapper +inline const DiagonalWrapper MatrixBase::asDiagonal() const { return derived(); @@ -265,21 +285,20 @@ MatrixBase::asDiagonal() const * \sa asDiagonal() */ template -bool MatrixBase::isDiagonal -(RealScalar prec) const +bool MatrixBase::isDiagonal(RealScalar prec) const { if(cols() != rows()) return false; RealScalar maxAbsOnDiagonal = static_cast(-1); for(Index j = 0; j < cols(); ++j) { - RealScalar absOnDiagonal = ei_abs(coeff(j,j)); + RealScalar absOnDiagonal = internal::abs(coeff(j,j)); if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; } for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < j; ++i) { - if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; - if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; + if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; + if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; } return true; } diff --git a/gtsam/3rdparty/Eigen/src/Core/DiagonalProduct.h b/gtsam/3rdparty/Eigen/src/Core/DiagonalProduct.h index 14c49e828..de0c6ed11 100644 --- a/gtsam/3rdparty/Eigen/src/Core/DiagonalProduct.h +++ b/gtsam/3rdparty/Eigen/src/Core/DiagonalProduct.h @@ -26,11 +26,12 @@ #ifndef EIGEN_DIAGONALPRODUCT_H #define EIGEN_DIAGONALPRODUCT_H +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_scalar_product_traits::ReturnType Scalar; + typedef typename scalar_product_traits::ReturnType Scalar; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, @@ -40,7 +41,7 @@ struct ei_traits > _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor, _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), - _SameTypes = ei_is_same_type::ret, + _SameTypes = is_same::value, // FIXME currently we need same types, but in the future the next rule should be the one //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))), _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))), @@ -49,9 +50,10 @@ struct ei_traits > CoeffReadCost = NumTraits::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost }; }; +} template -class DiagonalProduct : ei_no_assignment_operator, +class DiagonalProduct : internal::no_assignment_operator, public MatrixBase > { public: @@ -62,7 +64,7 @@ class DiagonalProduct : ei_no_assignment_operator, inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal) : m_matrix(matrix), m_diagonal(diagonal) { - ei_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); + eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); } inline Index rows() const { return m_matrix.rows(); } @@ -81,27 +83,27 @@ class DiagonalProduct : ei_no_assignment_operator, }; const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; - return packet_impl(row,col,indexInDiagonalVector,typename ei_meta_if< + return packet_impl(row,col,indexInDiagonalVector,typename internal::conditional< ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) - ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), ei_meta_true, ei_meta_false>::ret()); + ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type()); } protected: template - EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, ei_meta_true) const + EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const { - return ei_pmul(m_matrix.template packet(row, col), - ei_pset1(m_diagonal.diagonal().coeff(id))); + return internal::pmul(m_matrix.template packet(row, col), + internal::pset1(m_diagonal.diagonal().coeff(id))); } template - EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, ei_meta_false) const + EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const { enum { InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned }; - return ei_pmul(m_matrix.template packet(row, col), + return internal::pmul(m_matrix.template packet(row, col), m_diagonal.diagonal().template packet(id)); } diff --git a/gtsam/3rdparty/Eigen/src/Core/Dot.h b/gtsam/3rdparty/Eigen/src/Core/Dot.h index 9fc2fb60e..6e83191c5 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Dot.h +++ b/gtsam/3rdparty/Eigen/src/Core/Dot.h @@ -25,6 +25,8 @@ #ifndef EIGEN_DOT_H #define EIGEN_DOT_H +namespace internal { + // helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot // with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE // looking at the static assertions. Thus this is a trick to get better compile errors. @@ -37,23 +39,27 @@ template -struct ei_dot_nocheck +struct dot_nocheck { - static inline typename ei_traits::Scalar run(const MatrixBase& a, const MatrixBase& b) + typedef typename scalar_product_traits::Scalar,typename traits::Scalar>::ReturnType ResScalar; + static inline ResScalar run(const MatrixBase& a, const MatrixBase& b) { - return a.template binaryExpr::Scalar> >(b).sum(); + return a.template binaryExpr::Scalar,typename traits::Scalar> >(b).sum(); } }; template -struct ei_dot_nocheck +struct dot_nocheck { - static inline typename ei_traits::Scalar run(const MatrixBase& a, const MatrixBase& b) + typedef typename scalar_product_traits::Scalar,typename traits::Scalar>::ReturnType ResScalar; + static inline ResScalar run(const MatrixBase& a, const MatrixBase& b) { - return a.transpose().template binaryExpr::Scalar> >(b).sum(); + return a.transpose().template binaryExpr::Scalar,typename traits::Scalar> >(b).sum(); } }; +} // end namespace internal + /** \returns the dot product of *this with other. * * \only_for_vectors @@ -66,19 +72,47 @@ struct ei_dot_nocheck */ template template -typename ei_traits::Scalar +typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType MatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + typedef internal::scalar_conj_product_op func; + EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); + + eigen_assert(size() == other.size()); + + return internal::dot_nocheck::run(*this, other); +} + +#ifdef EIGEN2_SUPPORT +/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable + * (conjugating the second variable). Of course this only makes a difference in the complex case. + * + * This method is only available in EIGEN2_SUPPORT mode. + * + * \only_for_vectors + * + * \sa dot() + */ +template +template +typename internal::traits::Scalar +MatrixBase::eigen2_dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - ei_assert(size() == other.size()); + eigen_assert(size() == other.size()); - return ei_dot_nocheck::run(*this, other); + return internal::dot_nocheck::run(other,*this); } +#endif + //---------- implementation of L2 norm and related functions ---------- @@ -87,9 +121,9 @@ MatrixBase::dot(const MatrixBase& other) const * \sa dot(), norm() */ template -EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const +EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const { - return ei_real((*this).cwiseAbs2().sum()); + return internal::real((*this).cwiseAbs2().sum()); } /** \returns the \em l2 norm of *this, i.e., for vectors, the square root of the dot product of *this with itself. @@ -97,9 +131,9 @@ EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Rea * \sa dot(), squaredNorm() */ template -inline typename NumTraits::Scalar>::Real MatrixBase::norm() const +inline typename NumTraits::Scalar>::Real MatrixBase::norm() const { - return ei_sqrt(squaredNorm()); + return internal::sqrt(squaredNorm()); } /** \returns an expression of the quotient of *this by its own norm. @@ -112,8 +146,8 @@ template inline const typename MatrixBase::PlainObject MatrixBase::normalized() const { - typedef typename ei_nested::type Nested; - typedef typename ei_unref::type _Nested; + typedef typename internal::nested::type Nested; + typedef typename internal::remove_reference::type _Nested; _Nested n(derived()); return n / n.norm(); } @@ -132,55 +166,59 @@ inline void MatrixBase::normalize() //---------- implementation of other norms ---------- +namespace internal { + template -struct ei_lpNorm_selector +struct lpNorm_selector { - typedef typename NumTraits::Scalar>::Real RealScalar; + typedef typename NumTraits::Scalar>::Real RealScalar; inline static RealScalar run(const MatrixBase& m) { - return ei_pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); + return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); } }; template -struct ei_lpNorm_selector +struct lpNorm_selector { - inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().sum(); } }; template -struct ei_lpNorm_selector +struct lpNorm_selector { - inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.norm(); } }; template -struct ei_lpNorm_selector +struct lpNorm_selector { - inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) { return m.cwiseAbs().maxCoeff(); } }; +} // end namespace internal + /** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values - * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^p\infty \f$ + * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ * norm, that is the maximum of the absolute values of the coefficients of *this. * * \sa norm() */ template template -inline typename NumTraits::Scalar>::Real +inline typename NumTraits::Scalar>::Real MatrixBase::lpNorm() const { - return ei_lpNorm_selector::run(*this); + return internal::lpNorm_selector::run(*this); } //---------- implementation of isOrthogonal / isUnitary ---------- @@ -196,9 +234,9 @@ template bool MatrixBase::isOrthogonal (const MatrixBase& other, RealScalar prec) const { - typename ei_nested::type nested(derived()); - typename ei_nested::type otherNested(other.derived()); - return ei_abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); + typename internal::nested::type nested(derived()); + typename internal::nested::type otherNested(other.derived()); + return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); } /** \returns true if *this is approximately an unitary matrix, @@ -218,10 +256,10 @@ bool MatrixBase::isUnitary(RealScalar prec) const typename Derived::Nested nested(derived()); for(Index i = 0; i < cols(); ++i) { - if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) + if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) return false; for(Index j = 0; j < i; ++j) - if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) + if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) return false; } return true; diff --git a/gtsam/3rdparty/Eigen/src/Core/EigenBase.h b/gtsam/3rdparty/Eigen/src/Core/EigenBase.h index d07fea9a2..0472539af 100644 --- a/gtsam/3rdparty/Eigen/src/Core/EigenBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/EigenBase.h @@ -39,10 +39,10 @@ */ template struct EigenBase { -// typedef typename ei_plain_matrix_type::type PlainObject; +// typedef typename internal::plain_matrix_type::type PlainObject; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; /** \returns a reference to the derived object */ Derived& derived() { return *static_cast(this); } @@ -51,6 +51,8 @@ template struct EigenBase inline Derived& const_cast_derived() const { return *static_cast(const_cast(this)); } + inline const Derived& const_derived() const + { return *static_cast(this); } /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ inline Index rows() const { return derived().rows(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/Flagged.h b/gtsam/3rdparty/Eigen/src/Core/Flagged.h index 9211c50e8..458213ab5 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Flagged.h +++ b/gtsam/3rdparty/Eigen/src/Core/Flagged.h @@ -40,11 +40,14 @@ * * \sa MatrixBase::flagged() */ + +namespace internal { template -struct ei_traits > : ei_traits +struct traits > : traits { enum { Flags = (ExpressionType::Flags | Added) & ~Removed }; }; +} template class Flagged : public MatrixBase > @@ -52,9 +55,10 @@ template clas public: typedef MatrixBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Flagged) - typedef typename ei_meta_if::ret, - ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + typedef typename internal::conditional::ret, + ExpressionType, const ExpressionType&>::type ExpressionTypeNested; typedef typename ExpressionType::InnerIterator InnerIterator; inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {} @@ -64,21 +68,31 @@ template clas inline Index outerStride() const { return m_matrix.outerStride(); } inline Index innerStride() const { return m_matrix.innerStride(); } - inline const Scalar coeff(Index row, Index col) const + inline CoeffReturnType coeff(Index row, Index col) const { return m_matrix.coeff(row, col); } + inline CoeffReturnType coeff(Index index) const + { + return m_matrix.coeff(index); + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_matrix.const_cast_derived().coeffRef(row, col); + } + + inline const Scalar& coeffRef(Index index) const + { + return m_matrix.const_cast_derived().coeffRef(index); + } + inline Scalar& coeffRef(Index row, Index col) { return m_matrix.const_cast_derived().coeffRef(row, col); } - inline const Scalar coeff(Index index) const - { - return m_matrix.coeff(index); - } - inline Scalar& coeffRef(Index index) { return m_matrix.const_cast_derived().coeffRef(index); diff --git a/gtsam/3rdparty/Eigen/src/Core/ForceAlignedAccess.h b/gtsam/3rdparty/Eigen/src/Core/ForceAlignedAccess.h index 06d78fbe2..11c1f8f70 100644 --- a/gtsam/3rdparty/Eigen/src/Core/ForceAlignedAccess.h +++ b/gtsam/3rdparty/Eigen/src/Core/ForceAlignedAccess.h @@ -37,16 +37,19 @@ * * \sa MatrixBase::forceAlignedAccess() */ + +namespace internal { template -struct ei_traits > : public ei_traits +struct traits > : public traits {}; +} template class ForceAlignedAccess - : public ei_dense_xpr_base< ForceAlignedAccess >::type + : public internal::dense_xpr_base< ForceAlignedAccess >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} @@ -134,7 +137,7 @@ MatrixBase::forceAlignedAccess() */ template template -inline typename ei_makeconst,Derived&>::ret>::type +inline typename internal::add_const_on_value_type,Derived&>::type>::type MatrixBase::forceAlignedAccessIf() const { return derived(); @@ -145,7 +148,7 @@ MatrixBase::forceAlignedAccessIf() const */ template template -inline typename ei_meta_if,Derived&>::ret +inline typename internal::conditional,Derived&>::type MatrixBase::forceAlignedAccessIf() { return derived(); diff --git a/gtsam/3rdparty/Eigen/src/Core/Functors.h b/gtsam/3rdparty/Eigen/src/Core/Functors.h index 41ae4af42..ddfc67d82 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Functors.h +++ b/gtsam/3rdparty/Eigen/src/Core/Functors.h @@ -25,6 +25,8 @@ #ifndef EIGEN_FUNCTORS_H #define EIGEN_FUNCTORS_H +namespace internal { + // associative functors: /** \internal @@ -32,21 +34,21 @@ * * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum() */ -template struct ei_scalar_sum_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_sum_op) +template struct scalar_sum_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_padd(a,b); } + { return internal::padd(a,b); } template EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const - { return ei_predux(a); } + { return internal::predux(a); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = NumTraits::AddCost, - PacketAccess = ei_packet_traits::HasAdd + PacketAccess = packet_traits::HasAdd }; }; @@ -55,47 +57,55 @@ struct ei_functor_traits > { * * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux() */ -template struct ei_scalar_product_op { +template struct scalar_product_op { enum { - Vectorizable = ei_is_same_type::ret && ei_packet_traits::HasMul && ei_packet_traits::HasMul + // TODO vectorize mixed product + Vectorizable = is_same::value && packet_traits::HasMul && packet_traits::HasMul }; - typedef typename ei_scalar_product_traits::ReturnType result_type; - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_product_op) + typedef typename scalar_product_traits::ReturnType result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_pmul(a,b); } + { return internal::pmul(a,b); } template EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const - { return ei_predux_mul(a); } + { return internal::predux_mul(a); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = (NumTraits::MulCost + NumTraits::MulCost)/2, // rough estimate! - PacketAccess = ei_scalar_product_op::Vectorizable + PacketAccess = scalar_product_op::Vectorizable }; }; /** \internal * \brief Template functor to compute the conjugate product of two scalars * - * This is a short cut for ei_conj(x) * y which is needed for optimization purpose + * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y) */ -template struct ei_scalar_conj_product_op { - enum { Conj = NumTraits::IsComplex }; - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_conj_product_op) - EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const - { return ei_conj_helper().pmul(a,b); } +template struct scalar_conj_product_op { + + enum { + Conj = NumTraits::IsComplex + }; + + typedef typename scalar_product_traits::ReturnType result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op) + EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const + { return conj_helper().pmul(a,b); } + template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_conj_helper().pmul(a,b); } + { return conj_helper().pmul(a,b); } }; -template -struct ei_functor_traits > { +template +struct functor_traits > { enum { - Cost = NumTraits::MulCost, - PacketAccess = ei_packet_traits::HasMul + Cost = NumTraits::MulCost, + PacketAccess = internal::is_same::value && packet_traits::HasMul }; }; @@ -104,21 +114,21 @@ struct ei_functor_traits > { * * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff() */ -template struct ei_scalar_min_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_min_op) +template struct scalar_min_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::min(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_pmin(a,b); } + { return internal::pmin(a,b); } template EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const - { return ei_predux_min(a); } + { return internal::predux_min(a); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = NumTraits::AddCost, - PacketAccess = ei_packet_traits::HasMin + PacketAccess = packet_traits::HasMin }; }; @@ -127,21 +137,21 @@ struct ei_functor_traits > { * * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff() */ -template struct ei_scalar_max_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_max_op) +template struct scalar_max_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::max(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_pmax(a,b); } + { return internal::pmax(a,b); } template EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const - { return ei_predux_max(a); } + { return internal::predux_max(a); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = NumTraits::AddCost, - PacketAccess = ei_packet_traits::HasMax + PacketAccess = packet_traits::HasMax }; }; @@ -150,19 +160,19 @@ struct ei_functor_traits > { * * \sa MatrixBase::stableNorm(), class Redux */ -template struct ei_scalar_hypot_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_hypot_op) +template struct scalar_hypot_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op) // typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const { Scalar p = std::max(_x, _y); Scalar q = std::min(_x, _y); Scalar qp = q/p; - return p * ei_sqrt(Scalar(1) + qp*qp); + return p * sqrt(Scalar(1) + qp*qp); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess=0 }; }; @@ -173,18 +183,18 @@ struct ei_functor_traits > { * * \sa class CwiseBinaryOp, MatrixBase::operator- */ -template struct ei_scalar_difference_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_difference_op) +template struct scalar_difference_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_psub(a,b); } + { return internal::psub(a,b); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = NumTraits::AddCost, - PacketAccess = ei_packet_traits::HasSub + PacketAccess = packet_traits::HasSub }; }; @@ -193,18 +203,18 @@ struct ei_functor_traits > { * * \sa class CwiseBinaryOp, Cwise::operator/() */ -template struct ei_scalar_quotient_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_quotient_op) +template struct scalar_quotient_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const - { return ei_pdiv(a,b); } + { return internal::pdiv(a,b); } }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = 2 * NumTraits::MulCost, - PacketAccess = ei_packet_traits::HasDiv + PacketAccess = packet_traits::HasDiv }; }; @@ -215,18 +225,18 @@ struct ei_functor_traits > { * * \sa class CwiseUnaryOp, MatrixBase::operator- */ -template struct ei_scalar_opposite_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_opposite_op) +template struct scalar_opposite_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op) EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const - { return ei_pnegate(a); } + { return internal::pnegate(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::AddCost, - PacketAccess = ei_packet_traits::HasNegate }; + PacketAccess = packet_traits::HasNegate }; }; /** \internal @@ -234,20 +244,20 @@ struct ei_functor_traits > * * \sa class CwiseUnaryOp, Cwise::abs */ -template struct ei_scalar_abs_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_abs_op) +template struct scalar_abs_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return ei_abs(a); } + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return abs(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const - { return ei_pabs(a); } + { return internal::pabs(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::AddCost, - PacketAccess = ei_packet_traits::HasAbs + PacketAccess = packet_traits::HasAbs }; }; @@ -256,35 +266,35 @@ struct ei_functor_traits > * * \sa class CwiseUnaryOp, Cwise::abs2 */ -template struct ei_scalar_abs2_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_abs2_op) +template struct scalar_abs2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return ei_abs2(a); } + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return abs2(a); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const - { return ei_pmul(a,a); } + { return internal::pmul(a,a); } }; template -struct ei_functor_traits > -{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::HasAbs2 }; }; +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasAbs2 }; }; /** \internal * \brief Template functor to compute the conjugate of a complex value * * \sa class CwiseUnaryOp, MatrixBase::conjugate() */ -template struct ei_scalar_conjugate_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_conjugate_op) - EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return ei_conj(a); } +template struct scalar_conjugate_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return conj(a); } template - EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return ei_pconj(a); } + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::IsComplex ? NumTraits::AddCost : 0, - PacketAccess = ei_packet_traits::HasConj + PacketAccess = packet_traits::HasConj }; }; @@ -294,14 +304,14 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, MatrixBase::cast() */ template -struct ei_scalar_cast_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_cast_op) +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) typedef NewType result_type; - EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return ei_cast(a); } + EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast(a); } }; template -struct ei_functor_traits > -{ enum { Cost = ei_is_same_type::ret ? 0 : NumTraits::AddCost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = is_same::value ? 0 : NumTraits::AddCost, PacketAccess = false }; }; /** \internal * \brief Template functor to extract the real part of a complex @@ -309,13 +319,13 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, MatrixBase::real() */ template -struct ei_scalar_real_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_real_op) +struct scalar_real_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return ei_real(a); } + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return real(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal @@ -324,13 +334,13 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, MatrixBase::imag() */ template -struct ei_scalar_imag_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_imag_op) +struct scalar_imag_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return ei_imag(a); } + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return imag(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal @@ -339,13 +349,13 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, MatrixBase::real() */ template -struct ei_scalar_real_ref_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_real_ref_op) +struct scalar_real_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return ei_real_ref(*const_cast(&a)); } + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return real_ref(*const_cast(&a)); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal @@ -354,13 +364,13 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, MatrixBase::imag() */ template -struct ei_scalar_imag_ref_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_imag_ref_op) +struct scalar_imag_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op) typedef typename NumTraits::Real result_type; - EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return ei_imag_ref(*const_cast(&a)); } + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return imag_ref(*const_cast(&a)); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; /** \internal @@ -369,15 +379,15 @@ struct ei_functor_traits > * * \sa class CwiseUnaryOp, Cwise::exp() */ -template struct ei_scalar_exp_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_exp_op) - inline const Scalar operator() (const Scalar& a) const { return ei_exp(a); } - typedef typename ei_packet_traits::type Packet; - inline Packet packetOp(const Packet& a) const { return ei_pexp(a); } +template struct scalar_exp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) + inline const Scalar operator() (const Scalar& a) const { return exp(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } }; template -struct ei_functor_traits > -{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = ei_packet_traits::HasExp }; }; +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasExp }; }; /** \internal * @@ -385,81 +395,81 @@ struct ei_functor_traits > * * \sa class CwiseUnaryOp, Cwise::log() */ -template struct ei_scalar_log_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_log_op) - inline const Scalar operator() (const Scalar& a) const { return ei_log(a); } - typedef typename ei_packet_traits::type Packet; - inline Packet packetOp(const Packet& a) const { return ei_plog(a); } +template struct scalar_log_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) + inline const Scalar operator() (const Scalar& a) const { return log(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::plog(a); } }; template -struct ei_functor_traits > -{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = ei_packet_traits::HasLog }; }; +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasLog }; }; /** \internal * \brief Template functor to multiply a scalar by a fixed other one * * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/ */ -/* NOTE why doing the ei_pset1() in packetOp *is* an optimization ? - * indeed it seems better to declare m_other as a Packet and do the ei_pset1() once +/* NOTE why doing the pset1() in packetOp *is* an optimization ? + * indeed it seems better to declare m_other as a Packet and do the pset1() once * in the constructor. However, in practice: * - GCC does not like m_other as a Packet and generate a load every time it needs it - * - on the other hand GCC is able to moves the ei_pset1() away the loop :) + * - on the other hand GCC is able to moves the pset1() away the loop :) * - simpler code ;) * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y) */ template -struct ei_scalar_multiple_op { - typedef typename ei_packet_traits::type Packet; +struct scalar_multiple_op { + typedef typename packet_traits::type Packet; // FIXME default copy constructors seems bugged with std::complex<> - EIGEN_STRONG_INLINE ei_scalar_multiple_op(const ei_scalar_multiple_op& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE ei_scalar_multiple_op(const Scalar& other) : m_other(other) { } + EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { } EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const - { return ei_pmul(a, ei_pset1(m_other)); } - typename ei_makeconst::Nested>::type m_other; + { return internal::pmul(a, pset1(m_other)); } + typename add_const_on_value_type::Nested>::type m_other; }; template -struct ei_functor_traits > -{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::HasMul }; }; +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; template -struct ei_scalar_multiple2_op { - typedef typename ei_scalar_product_traits::ReturnType result_type; - EIGEN_STRONG_INLINE ei_scalar_multiple2_op(const ei_scalar_multiple2_op& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE ei_scalar_multiple2_op(const Scalar2& other) : m_other(other) { } +struct scalar_multiple2_op { + typedef typename scalar_product_traits::ReturnType result_type; + EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { } EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; } - typename ei_makeconst::Nested>::type m_other; + typename add_const_on_value_type::Nested>::type m_other; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; template -struct ei_scalar_quotient1_impl { - typedef typename ei_packet_traits::type Packet; +struct scalar_quotient1_impl { + typedef typename packet_traits::type Packet; // FIXME default copy constructors seems bugged with std::complex<> - EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const ei_scalar_quotient1_impl& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const Scalar& other) : m_other(static_cast(1) / other) {} + EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(static_cast(1) / other) {} EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const - { return ei_pmul(a, ei_pset1(m_other)); } + { return internal::pmul(a, pset1(m_other)); } const Scalar m_other; }; template -struct ei_functor_traits > -{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::HasMul }; }; +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; template -struct ei_scalar_quotient1_impl { +struct scalar_quotient1_impl { // FIXME default copy constructors seems bugged with std::complex<> - EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const ei_scalar_quotient1_impl& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const Scalar& other) : m_other(other) {} + EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(other) {} EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; } - typename ei_makeconst::Nested>::type m_other; + typename add_const_on_value_type::Nested>::type m_other; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 2 * NumTraits::MulCost, PacketAccess = false }; }; /** \internal @@ -471,43 +481,43 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, MatrixBase::operator/ */ template -struct ei_scalar_quotient1_op : ei_scalar_quotient1_impl::IsInteger > { - EIGEN_STRONG_INLINE ei_scalar_quotient1_op(const Scalar& other) - : ei_scalar_quotient1_impl::IsInteger >(other) {} +struct scalar_quotient1_op : scalar_quotient1_impl::IsInteger > { + EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) + : scalar_quotient1_impl::IsInteger >(other) {} }; template -struct ei_functor_traits > -: ei_functor_traits::IsInteger> > +struct functor_traits > +: functor_traits::IsInteger> > {}; // nullary functors template -struct ei_scalar_constant_op { - typedef typename ei_packet_traits::type Packet; - EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { } - EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { } +struct scalar_constant_op { + typedef typename packet_traits::type Packet; + EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { } template EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; } template - EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return ei_pset1(m_other); } + EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1(m_other); } const Scalar m_other; }; template -struct ei_functor_traits > +struct functor_traits > // FIXME replace this packet test by a safe one -{ enum { Cost = 1, PacketAccess = ei_packet_traits::Vectorizable, IsRepeatable = true }; }; +{ enum { Cost = 1, PacketAccess = packet_traits::Vectorizable, IsRepeatable = true }; }; -template struct ei_scalar_identity_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_identity_op) +template struct scalar_identity_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op) template EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false, IsRepeatable = true }; }; -template struct ei_linspaced_op_impl; +template struct linspaced_op_impl; // linear access for packet ops: // 1) initialization @@ -515,19 +525,19 @@ template struct ei_linspaced_op_impl; // 2) each step // base += [size*step, ..., size*step] template -struct ei_linspaced_op_impl +struct linspaced_op_impl { - typedef typename ei_packet_traits::type Packet; + typedef typename packet_traits::type Packet; - ei_linspaced_op_impl(Scalar low, Scalar step) : + linspaced_op_impl(Scalar low, Scalar step) : m_low(low), m_step(step), - m_packetStep(ei_pset1(ei_packet_traits::size*step)), - m_base(ei_padd(ei_pset1(low),ei_pmul(ei_pset1(step),ei_plset(-ei_packet_traits::size)))) {} + m_packetStep(pset1(packet_traits::size*step)), + m_base(padd(pset1(low),pmul(pset1(step),plset(-packet_traits::size)))) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; } template - EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = ei_padd(m_base,m_packetStep); } + EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); } const Scalar m_low; const Scalar m_step; @@ -539,19 +549,20 @@ struct ei_linspaced_op_impl // 1) each step // [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) ) template -struct ei_linspaced_op_impl +struct linspaced_op_impl { - typedef typename ei_packet_traits::type Packet; + typedef typename packet_traits::type Packet; - ei_linspaced_op_impl(Scalar low, Scalar step) : + linspaced_op_impl(Scalar low, Scalar step) : m_low(low), m_step(step), - m_lowPacket(ei_pset1(m_low)), m_stepPacket(ei_pset1(m_step)), m_interPacket(ei_plset(0)) {} + m_lowPacket(pset1(m_low)), m_stepPacket(pset1(m_step)), m_interPacket(plset(0)) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; } + template EIGEN_STRONG_INLINE const Packet packetOp(Index i) const - { return ei_padd(m_lowPacket, ei_pmul(m_stepPacket, ei_padd(ei_pset1(i),m_interPacket))); } + { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1(i),m_interPacket))); } const Scalar m_low; const Scalar m_step; @@ -565,110 +576,190 @@ struct ei_linspaced_op_impl // Forward declaration (we default to random access which does not really give // us a speed gain when using packet access but it allows to use the functor in // nested expressions). -template struct ei_linspaced_op; -template struct ei_functor_traits< ei_linspaced_op > -{ enum { Cost = 1, PacketAccess = ei_packet_traits::HasSetLinear, IsRepeatable = true }; }; -template struct ei_linspaced_op +template struct linspaced_op; +template struct functor_traits< linspaced_op > +{ enum { Cost = 1, PacketAccess = packet_traits::HasSetLinear, IsRepeatable = true }; }; +template struct linspaced_op { - typedef typename ei_packet_traits::type Packet; - ei_linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {} + typedef typename packet_traits::type Packet; + linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {} + template - EIGEN_STRONG_INLINE const Scalar operator() (Index i, Index = 0) const { return impl(i); } + EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } + + // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since + // there row==0 and col is used for the actual iteration. template - EIGEN_STRONG_INLINE const Packet packetOp(Index i, Index = 0) const { return impl.packetOp(i); } + EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const + { + eigen_assert(col==0 || row==0); + return impl(col + row); + } + + template + EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); } + + // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since + // there row==0 and col is used for the actual iteration. + template + EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const + { + eigen_assert(col==0 || row==0); + return impl(col + row); + } + // This proxy object handles the actual required temporaries, the different // implementations (random vs. sequential access) as well as the // correct piping to size 2/4 packet operations. - const ei_linspaced_op_impl impl; + const linspaced_op_impl impl; }; -// all functors allow linear access, except ei_scalar_identity_op. So we fix here a quick meta +// all functors allow linear access, except scalar_identity_op. So we fix here a quick meta // to indicate whether a functor allows linear access, just always answering 'yes' except for -// ei_scalar_identity_op. -// FIXME move this to ei_functor_traits adding a ei_functor_default -template struct ei_functor_has_linear_access { enum { ret = 1 }; }; -template struct ei_functor_has_linear_access > { enum { ret = 0 }; }; +// scalar_identity_op. +// FIXME move this to functor_traits adding a functor_default +template struct functor_has_linear_access { enum { ret = 1 }; }; +template struct functor_has_linear_access > { enum { ret = 0 }; }; // in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication // where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex. -// FIXME move this to ei_functor_traits adding a ei_functor_default -template struct ei_functor_allows_mixing_real_and_complex { enum { ret = 0 }; }; -template struct ei_functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; +// FIXME move this to functor_traits adding a functor_default +template struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; }; +template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; +template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; /** \internal * \brief Template functor to add a scalar to a fixed other one * \sa class CwiseUnaryOp, Array::operator+ */ -/* If you wonder why doing the ei_pset1() in packetOp() is an optimization check ei_scalar_multiple_op */ +/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */ template -struct ei_scalar_add_op { - typedef typename ei_packet_traits::type Packet; +struct scalar_add_op { + typedef typename packet_traits::type Packet; // FIXME default copy constructors seems bugged with std::complex<> - inline ei_scalar_add_op(const ei_scalar_add_op& other) : m_other(other.m_other) { } - inline ei_scalar_add_op(const Scalar& other) : m_other(other) { } + inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { } + inline scalar_add_op(const Scalar& other) : m_other(other) { } inline Scalar operator() (const Scalar& a) const { return a + m_other; } inline const Packet packetOp(const Packet& a) const - { return ei_padd(a, ei_pset1(m_other)); } + { return internal::padd(a, pset1(m_other)); } const Scalar m_other; }; template -struct ei_functor_traits > -{ enum { Cost = NumTraits::AddCost, PacketAccess = ei_packet_traits::HasAdd }; }; +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasAdd }; }; /** \internal * \brief Template functor to compute the square root of a scalar * \sa class CwiseUnaryOp, Cwise::sqrt() */ -template struct ei_scalar_sqrt_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_sqrt_op) - inline const Scalar operator() (const Scalar& a) const { return ei_sqrt(a); } - typedef typename ei_packet_traits::type Packet; - inline Packet packetOp(const Packet& a) const { return ei_psqrt(a); } +template struct scalar_sqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) + inline const Scalar operator() (const Scalar& a) const { return sqrt(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, - PacketAccess = ei_packet_traits::HasSqrt + PacketAccess = packet_traits::HasSqrt }; }; /** \internal * \brief Template functor to compute the cosine of a scalar - * \sa class CwiseUnaryOp, Cwise::cos() + * \sa class CwiseUnaryOp, ArrayBase::cos() */ -template struct ei_scalar_cos_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_cos_op) - inline Scalar operator() (const Scalar& a) const { return ei_cos(a); } - typedef typename ei_packet_traits::type Packet; - inline Packet packetOp(const Packet& a) const { return ei_pcos(a); } +template struct scalar_cos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) + inline Scalar operator() (const Scalar& a) const { return cos(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, - PacketAccess = ei_packet_traits::HasCos + PacketAccess = packet_traits::HasCos }; }; /** \internal * \brief Template functor to compute the sine of a scalar - * \sa class CwiseUnaryOp, Cwise::sin() + * \sa class CwiseUnaryOp, ArrayBase::sin() */ -template struct ei_scalar_sin_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_sin_op) - inline const Scalar operator() (const Scalar& a) const { return ei_sin(a); } - typedef typename ei_packet_traits::type Packet; - inline Packet packetOp(const Packet& a) const { return ei_psin(a); } +template struct scalar_sin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) + inline const Scalar operator() (const Scalar& a) const { return sin(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::psin(a); } }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, - PacketAccess = ei_packet_traits::HasSin + PacketAccess = packet_traits::HasSin + }; +}; + + +/** \internal + * \brief Template functor to compute the tan of a scalar + * \sa class CwiseUnaryOp, ArrayBase::tan() + */ +template struct scalar_tan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) + inline const Scalar operator() (const Scalar& a) const { return tan(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasTan + }; +}; + +/** \internal + * \brief Template functor to compute the arc cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::acos() + */ +template struct scalar_acos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) + inline const Scalar operator() (const Scalar& a) const { return acos(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasACos + }; +}; + +/** \internal + * \brief Template functor to compute the arc sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::asin() + */ +template struct scalar_asin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) + inline const Scalar operator() (const Scalar& a) const { return acos(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasASin }; }; @@ -677,15 +768,15 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, Cwise::pow */ template -struct ei_scalar_pow_op { +struct scalar_pow_op { // FIXME default copy constructors seems bugged with std::complex<> - inline ei_scalar_pow_op(const ei_scalar_pow_op& other) : m_exponent(other.m_exponent) { } - inline ei_scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {} - inline Scalar operator() (const Scalar& a) const { return ei_pow(a, m_exponent); } + inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { } + inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {} + inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); } const Scalar m_exponent; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; /** \internal @@ -693,155 +784,157 @@ struct ei_functor_traits > * \sa class CwiseUnaryOp, Cwise::inverse() */ template -struct ei_scalar_inverse_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_inverse_op) +struct scalar_inverse_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op) inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; } template inline const Packet packetOp(const Packet& a) const - { return ei_pdiv(ei_pset1(Scalar(1)),a); } + { return internal::pdiv(pset1(Scalar(1)),a); } }; template -struct ei_functor_traits > -{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::HasDiv }; }; +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; /** \internal * \brief Template functor to compute the square of a scalar * \sa class CwiseUnaryOp, Cwise::square() */ template -struct ei_scalar_square_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_square_op) +struct scalar_square_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op) inline Scalar operator() (const Scalar& a) const { return a*a; } template inline const Packet packetOp(const Packet& a) const - { return ei_pmul(a,a); } + { return internal::pmul(a,a); } }; template -struct ei_functor_traits > -{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::HasMul }; }; +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; /** \internal * \brief Template functor to compute the cube of a scalar * \sa class CwiseUnaryOp, Cwise::cube() */ template -struct ei_scalar_cube_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_cube_op) +struct scalar_cube_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op) inline Scalar operator() (const Scalar& a) const { return a*a*a; } template inline const Packet packetOp(const Packet& a) const - { return ei_pmul(a,ei_pmul(a,a)); } + { return internal::pmul(a,pmul(a,a)); } }; template -struct ei_functor_traits > -{ enum { Cost = 2*NumTraits::MulCost, PacketAccess = ei_packet_traits::HasMul }; }; +struct functor_traits > +{ enum { Cost = 2*NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; // default functor traits for STL functors: template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; template -struct ei_functor_traits > -{ enum { Cost = ei_functor_traits::Cost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; template -struct ei_functor_traits > -{ enum { Cost = ei_functor_traits::Cost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; template -struct ei_functor_traits > -{ enum { Cost = 1 + ei_functor_traits::Cost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; template -struct ei_functor_traits > -{ enum { Cost = 1 + ei_functor_traits::Cost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; #ifdef EIGEN_STDEXT_SUPPORT template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 0, PacketAccess = false }; }; template -struct ei_functor_traits > > +struct functor_traits > > { enum { Cost = 0, PacketAccess = false }; }; template -struct ei_functor_traits > > +struct functor_traits > > { enum { Cost = 0, PacketAccess = false }; }; template -struct ei_functor_traits > -{ enum { Cost = ei_functor_traits::Cost + ei_functor_traits::Cost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; template -struct ei_functor_traits > -{ enum { Cost = ei_functor_traits::Cost + ei_functor_traits::Cost + ei_functor_traits::Cost, PacketAccess = false }; }; +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; #endif // EIGEN_STDEXT_SUPPORT -// allow to add new functors and specializations of ei_functor_traits from outside Eigen. -// this macro is really needed because ei_functor_traits must be specialized after it is declared but before it is used... +// allow to add new functors and specializations of functor_traits from outside Eigen. +// this macro is really needed because functor_traits must be specialized after it is declared but before it is used... #ifdef EIGEN_FUNCTORS_PLUGIN #include EIGEN_FUNCTORS_PLUGIN #endif +} // end namespace internal + #endif // EIGEN_FUNCTORS_H diff --git a/gtsam/3rdparty/Eigen/src/Core/Fuzzy.h b/gtsam/3rdparty/Eigen/src/Core/Fuzzy.h index 4a3b5e4dc..3cd82d802 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Fuzzy.h +++ b/gtsam/3rdparty/Eigen/src/Core/Fuzzy.h @@ -26,9 +26,67 @@ #ifndef EIGEN_FUZZY_H #define EIGEN_FUZZY_H -// TODO support small integer types properly i.e. do exact compare on coeffs --- taking a HS norm is guaranteed to cause integer overflow. +namespace internal +{ + +template::IsInteger> +struct isApprox_selector +{ + static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) + { + const typename internal::nested::type nested(x); + const typename internal::nested::type otherNested(y); + return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * std::min(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + } +}; + +template +struct isApprox_selector +{ + static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar) + { + return x.matrix() == y.matrix(); + } +}; + +template::IsInteger> +struct isMuchSmallerThan_object_selector +{ + static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) + { + return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum(); + } +}; + +template +struct isMuchSmallerThan_object_selector +{ + static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar) + { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +template::IsInteger> +struct isMuchSmallerThan_scalar_selector +{ + static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec) + { + return x.cwiseAbs2().sum() <= abs2(prec * y); + } +}; + +template +struct isMuchSmallerThan_scalar_selector +{ + static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar) + { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +} // end namespace internal -#ifndef EIGEN_LEGACY_COMPARES /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. @@ -42,10 +100,10 @@ * \note Because of the multiplicativeness of this comparison, one can't use this function * to check whether \c *this is approximately equal to the zero matrix or vector. * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix - * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const + * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const * RealScalar&, RealScalar) instead. * - * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const + * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const */ template template @@ -54,12 +112,7 @@ bool DenseBase::isApprox( RealScalar prec ) const { - const typename ei_nested::type nested(derived()); - const typename ei_nested::type otherNested(other.derived()); -// std::cerr << typeid(Derived).name() << " => " << typeid(typename ei_nested::type).name() << "\n"; -// std::cerr << typeid(OtherDerived).name() << " => " << typeid(typename ei_nested::type).name() << "\n"; -// return false; - return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * std::min(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + return internal::isApprox_selector::run(derived(), other.derived(), prec); } /** \returns \c true if the norm of \c *this is much smaller than \a other, @@ -81,7 +134,7 @@ bool DenseBase::isMuchSmallerThan( RealScalar prec ) const { - return derived().cwiseAbs2().sum() <= prec * prec * other * other; + return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); } /** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, @@ -101,140 +154,7 @@ bool DenseBase::isMuchSmallerThan( RealScalar prec ) const { - return derived().cwiseAbs2().sum() <= prec * prec * other.derived().cwiseAbs2().sum(); + return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); } -#else - -template -struct ei_fuzzy_selector; - -/** \returns \c true if \c *this is approximately equal to \a other, within the precision - * determined by \a prec. - * - * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ - * are considered to be approximately equal within precision \f$ p \f$ if - * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] - * For matrices, the comparison is done on all columns. - * - * \note Because of the multiplicativeness of this comparison, one can't use this function - * to check whether \c *this is approximately equal to the zero matrix or vector. - * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix - * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const - * RealScalar&, RealScalar) instead. - * - * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const - */ -template -template -bool DenseBase::isApprox( - const DenseBase& other, - RealScalar prec -) const -{ - return ei_fuzzy_selector::isApprox(derived(), other.derived(), prec); -} - -/** \returns \c true if the norm of \c *this is much smaller than \a other, - * within the precision determined by \a prec. - * - * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is - * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if - * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] - * For matrices, the comparison is done on all columns. - * - * \sa isApprox(), isMuchSmallerThan(const DenseBase&, RealScalar) const - */ -template -bool DenseBase::isMuchSmallerThan( - const typename NumTraits::Real& other, - RealScalar prec -) const -{ - return ei_fuzzy_selector::isMuchSmallerThan(derived(), other, prec); -} - -/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, - * within the precision determined by \a prec. - * - * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is - * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if - * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] - * For matrices, the comparison is done on all columns. - * - * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const - */ -template -template -bool DenseBase::isMuchSmallerThan( - const DenseBase& other, - RealScalar prec -) const -{ - return ei_fuzzy_selector::isMuchSmallerThan(derived(), other.derived(), prec); -} - - -template -struct ei_fuzzy_selector -{ - typedef typename Derived::RealScalar RealScalar; - static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec) - { - EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) - ei_assert(self.size() == other.size()); - return((self - other).squaredNorm() <= std::min(self.squaredNorm(), other.squaredNorm()) * prec * prec); - } - static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) - { - return(self.squaredNorm() <= ei_abs2(other * prec)); - } - static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec) - { - EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) - ei_assert(self.size() == other.size()); - return(self.squaredNorm() <= other.squaredNorm() * prec * prec); - } -}; - -template -struct ei_fuzzy_selector -{ - typedef typename Derived::RealScalar RealScalar; - typedef typename Derived::Index Index; - static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec) - { - EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) - ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); - typename Derived::Nested nested(self); - typename OtherDerived::Nested otherNested(other); - for(Index i = 0; i < self.cols(); ++i) - if((nested.col(i) - otherNested.col(i)).squaredNorm() - > std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec) - return false; - return true; - } - static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) - { - typename Derived::Nested nested(self); - for(Index i = 0; i < self.cols(); ++i) - if(nested.col(i).squaredNorm() > ei_abs2(other * prec)) - return false; - return true; - } - static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec) - { - EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) - ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); - typename Derived::Nested nested(self); - typename OtherDerived::Nested otherNested(other); - for(Index i = 0; i < self.cols(); ++i) - if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec) - return false; - return true; - } -}; - -#endif - #endif // EIGEN_FUZZY_H diff --git a/gtsam/3rdparty/Eigen/src/Core/GenericPacketMath.h b/gtsam/3rdparty/Eigen/src/Core/GenericPacketMath.h index 8ace18174..4ba322a32 100644 --- a/gtsam/3rdparty/Eigen/src/Core/GenericPacketMath.h +++ b/gtsam/3rdparty/Eigen/src/Core/GenericPacketMath.h @@ -26,6 +26,8 @@ #ifndef EIGEN_GENERIC_PACKET_MATH_H #define EIGEN_GENERIC_PACKET_MATH_H +namespace internal { + /** \internal * \file GenericPacketMath.h * @@ -50,7 +52,7 @@ #define EIGEN_DEBUG_UNALIGNED_STORE #endif -struct ei_default_packet_traits +struct default_packet_traits { enum { HasAdd = 1, @@ -79,7 +81,7 @@ struct ei_default_packet_traits }; }; -template struct ei_packet_traits : ei_default_packet_traits +template struct packet_traits : default_packet_traits { typedef T type; enum { @@ -103,92 +105,92 @@ template struct ei_packet_traits : ei_default_packet_traits /** \internal \returns a + b (coeff-wise) */ template inline Packet -ei_padd(const Packet& a, +padd(const Packet& a, const Packet& b) { return a+b; } /** \internal \returns a - b (coeff-wise) */ template inline Packet -ei_psub(const Packet& a, +psub(const Packet& a, const Packet& b) { return a-b; } /** \internal \returns -a (coeff-wise) */ template inline Packet -ei_pnegate(const Packet& a) { return -a; } +pnegate(const Packet& a) { return -a; } /** \internal \returns conj(a) (coeff-wise) */ template inline Packet -ei_pconj(const Packet& a) { return ei_conj(a); } +pconj(const Packet& a) { return conj(a); } /** \internal \returns a * b (coeff-wise) */ template inline Packet -ei_pmul(const Packet& a, +pmul(const Packet& a, const Packet& b) { return a*b; } /** \internal \returns a / b (coeff-wise) */ template inline Packet -ei_pdiv(const Packet& a, +pdiv(const Packet& a, const Packet& b) { return a/b; } /** \internal \returns the min of \a a and \a b (coeff-wise) */ template inline Packet -ei_pmin(const Packet& a, +pmin(const Packet& a, const Packet& b) { return std::min(a, b); } /** \internal \returns the max of \a a and \a b (coeff-wise) */ template inline Packet -ei_pmax(const Packet& a, +pmax(const Packet& a, const Packet& b) { return std::max(a, b); } /** \internal \returns the absolute value of \a a */ template inline Packet -ei_pabs(const Packet& a) { return ei_abs(a); } +pabs(const Packet& a) { return abs(a); } /** \internal \returns the bitwise and of \a a and \a b */ template inline Packet -ei_pand(const Packet& a, const Packet& b) { return a & b; } +pand(const Packet& a, const Packet& b) { return a & b; } /** \internal \returns the bitwise or of \a a and \a b */ template inline Packet -ei_por(const Packet& a, const Packet& b) { return a | b; } +por(const Packet& a, const Packet& b) { return a | b; } /** \internal \returns the bitwise xor of \a a and \a b */ template inline Packet -ei_pxor(const Packet& a, const Packet& b) { return a ^ b; } +pxor(const Packet& a, const Packet& b) { return a ^ b; } /** \internal \returns the bitwise andnot of \a a and \a b */ template inline Packet -ei_pandnot(const Packet& a, const Packet& b) { return a & (!b); } +pandnot(const Packet& a, const Packet& b) { return a & (!b); } /** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ template inline Packet -ei_pload(const typename ei_unpacket_traits::type* from) { return *from; } +pload(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet version of \a *from, (un-aligned load) */ template inline Packet -ei_ploadu(const typename ei_unpacket_traits::type* from) { return *from; } +ploadu(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet with elements of \a *from duplicated, e.g.: (from[0],from[0],from[1],from[1]) */ template inline Packet -ei_ploaddup(const typename ei_unpacket_traits::type* from) { return *from; } +ploaddup(const typename unpacket_traits::type* from) { return *from; } /** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ template inline Packet -ei_pset1(const typename ei_unpacket_traits::type& a) { return a; } +pset1(const typename unpacket_traits::type& a) { return a; } /** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ -template inline typename ei_packet_traits::type -ei_plset(const Scalar& a) { return a; } +template inline typename packet_traits::type +plset(const Scalar& a) { return a; } /** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ -template inline void ei_pstore(Scalar* to, const Packet& from) +template inline void pstore(Scalar* to, const Packet& from) { (*to) = from; } /** \internal copy the packet \a from to \a *to, (un-aligned store) */ -template inline void ei_pstoreu(Scalar* to, const Packet& from) +template inline void pstoreu(Scalar* to, const Packet& from) { (*to) = from; } /** \internal tries to do cache prefetching of \a addr */ -template inline void ei_prefetch(const Scalar* addr) +template inline void prefetch(const Scalar* addr) { #if !defined(_MSC_VER) __builtin_prefetch(addr); @@ -196,93 +198,118 @@ __builtin_prefetch(addr); } /** \internal \returns the first element of a packet */ -template inline typename ei_unpacket_traits::type ei_pfirst(const Packet& a) +template inline typename unpacket_traits::type pfirst(const Packet& a) { return a; } /** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */ template inline Packet -ei_preduxp(const Packet* vecs) { return vecs[0]; } +preduxp(const Packet* vecs) { return vecs[0]; } /** \internal \returns the sum of the elements of \a a*/ -template inline typename ei_unpacket_traits::type ei_predux(const Packet& a) +template inline typename unpacket_traits::type predux(const Packet& a) { return a; } /** \internal \returns the product of the elements of \a a*/ -template inline typename ei_unpacket_traits::type ei_predux_mul(const Packet& a) +template inline typename unpacket_traits::type predux_mul(const Packet& a) { return a; } /** \internal \returns the min of the elements of \a a*/ -template inline typename ei_unpacket_traits::type ei_predux_min(const Packet& a) +template inline typename unpacket_traits::type predux_min(const Packet& a) { return a; } /** \internal \returns the max of the elements of \a a*/ -template inline typename ei_unpacket_traits::type ei_predux_max(const Packet& a) +template inline typename unpacket_traits::type predux_max(const Packet& a) { return a; } /** \internal \returns the reversed elements of \a a*/ -template inline Packet ei_preverse(const Packet& a) +template inline Packet preverse(const Packet& a) { return a; } + +/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ +template inline Packet pcplxflip(const Packet& a) +{ return Packet(imag(a),real(a)); } + /************************** * Special math functions ***************************/ -/** \internal \returns the sin of \a a (coeff-wise) */ +/** \internal \returns the sine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS -Packet ei_psin(const Packet& a) { return ei_sin(a); } +Packet psin(const Packet& a) { return sin(a); } -/** \internal \returns the cos of \a a (coeff-wise) */ +/** \internal \returns the cosine of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS -Packet ei_pcos(const Packet& a) { return ei_cos(a); } +Packet pcos(const Packet& a) { return cos(a); } + +/** \internal \returns the tan of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet ptan(const Packet& a) { return tan(a); } + +/** \internal \returns the arc sine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pasin(const Packet& a) { return asin(a); } + +/** \internal \returns the arc cosine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pacos(const Packet& a) { return acos(a); } /** \internal \returns the exp of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS -Packet ei_pexp(const Packet& a) { return ei_exp(a); } +Packet pexp(const Packet& a) { return exp(a); } /** \internal \returns the log of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS -Packet ei_plog(const Packet& a) { return ei_log(a); } +Packet plog(const Packet& a) { return log(a); } /** \internal \returns the square-root of \a a (coeff-wise) */ template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS -Packet ei_psqrt(const Packet& a) { return ei_sqrt(a); } +Packet psqrt(const Packet& a) { return sqrt(a); } /*************************************************************************** * The following functions might not have to be overwritten for vectorized types ***************************************************************************/ +/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */ +// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type) +template +inline void pstore1(typename unpacket_traits::type* to, const typename unpacket_traits::type& a) +{ + pstore(to, pset1(a)); +} + /** \internal \returns a * b + c (coeff-wise) */ template inline Packet -ei_pmadd(const Packet& a, +pmadd(const Packet& a, const Packet& b, const Packet& c) -{ return ei_padd(ei_pmul(a, b),c); } +{ return padd(pmul(a, b),c); } /** \internal \returns a packet version of \a *from. * \If LoadMode equals Aligned, \a from must be 16 bytes aligned */ template -inline Packet ei_ploadt(const typename ei_unpacket_traits::type* from) +inline Packet ploadt(const typename unpacket_traits::type* from) { if(LoadMode == Aligned) - return ei_pload(from); + return pload(from); else - return ei_ploadu(from); + return ploadu(from); } /** \internal copy the packet \a from to \a *to. * If StoreMode equals Aligned, \a to must be 16 bytes aligned */ template -inline void ei_pstoret(Scalar* to, const Packet& from) +inline void pstoret(Scalar* to, const Packet& from) { if(LoadMode == Aligned) - ei_pstore(to, from); + pstore(to, from); else - ei_pstoreu(to, from); + pstoreu(to, from); } -/** \internal default implementation of ei_palign() allowing partial specialization */ +/** \internal default implementation of palign() allowing partial specialization */ template -struct ei_palign_impl +struct palign_impl { // by default data are aligned, so there is nothing to be done :) inline static void run(PacketType&, const PacketType&) {} @@ -291,20 +318,22 @@ struct ei_palign_impl /** \internal update \a first using the concatenation of the \a Offset last elements * of \a first and packet_size minus \a Offset first elements of \a second */ template -inline void ei_palign(PacketType& first, const PacketType& second) +inline void palign(PacketType& first, const PacketType& second) { - ei_palign_impl::run(first,second); + palign_impl::run(first,second); } /*************************************************************************** * Fast complex products (GCC generates a function call which is very slow) ***************************************************************************/ -template<> inline std::complex ei_pmul(const std::complex& a, const std::complex& b) -{ return std::complex(ei_real(a)*ei_real(b) - ei_imag(a)*ei_imag(b), ei_imag(a)*ei_real(b) + ei_real(a)*ei_imag(b)); } +template<> inline std::complex pmul(const std::complex& a, const std::complex& b) +{ return std::complex(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); } -template<> inline std::complex ei_pmul(const std::complex& a, const std::complex& b) -{ return std::complex(ei_real(a)*ei_real(b) - ei_imag(a)*ei_imag(b), ei_imag(a)*ei_real(b) + ei_real(a)*ei_imag(b)); } +template<> inline std::complex pmul(const std::complex& a, const std::complex& b) +{ return std::complex(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); } + +} // end namespace internal #endif // EIGEN_GENERIC_PACKET_MATH_H diff --git a/gtsam/3rdparty/Eigen/src/Core/GlobalFunctions.h b/gtsam/3rdparty/Eigen/src/Core/GlobalFunctions.h index 001c5b0b6..144145a95 100644 --- a/gtsam/3rdparty/Eigen/src/Core/GlobalFunctions.h +++ b/gtsam/3rdparty/Eigen/src/Core/GlobalFunctions.h @@ -28,7 +28,7 @@ #define EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(NAME,FUNCTOR) \ template \ - inline const Eigen::CwiseUnaryOp, Derived> \ + inline const Eigen::CwiseUnaryOp, const Derived> \ NAME(const Eigen::ArrayBase& x) { \ return x.derived(); \ } @@ -38,7 +38,7 @@ template \ struct NAME##_retval > \ { \ - typedef const Eigen::CwiseUnaryOp, Derived> type; \ + typedef const Eigen::CwiseUnaryOp, const Derived> type; \ }; \ template \ struct NAME##_impl > \ @@ -52,17 +52,20 @@ namespace std { - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,ei_scalar_real_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,ei_scalar_imag_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,ei_scalar_sin_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,ei_scalar_cos_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,ei_scalar_exp_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,ei_scalar_log_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,ei_scalar_abs_op) - EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,ei_scalar_sqrt_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,scalar_real_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,scalar_imag_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,scalar_sin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,scalar_cos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(asin,scalar_asin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(acos,scalar_acos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(tan,scalar_tan_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,scalar_exp_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,scalar_log_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,scalar_abs_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,scalar_sqrt_op) template - inline const Eigen::CwiseUnaryOp, Derived> + inline const Eigen::CwiseUnaryOp, const Derived> pow(const Eigen::ArrayBase& x, const typename Derived::Scalar& exponent) { \ return x.derived().pow(exponent); \ } @@ -70,17 +73,23 @@ namespace std namespace Eigen { - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_real,ei_scalar_real_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_imag,ei_scalar_imag_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_sin,ei_scalar_sin_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_cos,ei_scalar_cos_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_exp,ei_scalar_exp_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_log,ei_scalar_log_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_abs,ei_scalar_abs_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_abs2,ei_scalar_abs2_op) - EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(ei_sqrt,ei_scalar_sqrt_op) + namespace internal + { + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sin,scalar_sin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(cos,scalar_cos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(asin,scalar_asin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(acos,scalar_acos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(tan,scalar_tan_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(exp,scalar_exp_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(log,scalar_log_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs,scalar_abs_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sqrt,scalar_sqrt_op) + } } -// TODO: cleanly disable those functions that are not supported on Array (ei_real_ref, ei_random, ei_isApprox...) +// TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...) #endif // EIGEN_GLOBAL_FUNCTIONS_H diff --git a/gtsam/3rdparty/Eigen/src/Core/IO.h b/gtsam/3rdparty/Eigen/src/Core/IO.h index 7c742d867..baaf046ce 100644 --- a/gtsam/3rdparty/Eigen/src/Core/IO.h +++ b/gtsam/3rdparty/Eigen/src/Core/IO.h @@ -30,6 +30,11 @@ enum { DontAlignCols = 1 }; enum { StreamPrecision = -1, FullPrecision = -2 }; +namespace internal { +template +std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt); +} + /** \class IOFormat * \ingroup Core_Module * @@ -106,7 +111,7 @@ class WithFormat friend std::ostream & operator << (std::ostream & s, const WithFormat& wf) { - return ei_print_matrix(s, wf.m_matrix.eval(), wf.m_format); + return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format); } protected: @@ -128,18 +133,20 @@ DenseBase::format(const IOFormat& fmt) const return WithFormat(derived(), fmt); } +namespace internal { + template -struct ei_significant_decimals_default_impl +struct significant_decimals_default_impl { typedef typename NumTraits::Real RealScalar; static inline int run() { - return ei_cast(std::ceil(-ei_log(NumTraits::epsilon())/ei_log(RealScalar(10)))); + return cast(std::ceil(-log(NumTraits::epsilon())/log(RealScalar(10)))); } }; template -struct ei_significant_decimals_default_impl +struct significant_decimals_default_impl { static inline int run() { @@ -148,14 +155,14 @@ struct ei_significant_decimals_default_impl }; template -struct ei_significant_decimals_impl - : ei_significant_decimals_default_impl::IsInteger> +struct significant_decimals_impl + : significant_decimals_default_impl::IsInteger> {}; /** \internal * print the matrix \a _m to the output stream \a s using the output format \a fmt */ template -std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) +std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) { if(_m.size() == 0) { @@ -182,7 +189,7 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm } else { - explicit_precision = ei_significant_decimals_impl::run(); + explicit_precision = significant_decimals_impl::run(); } } else @@ -228,6 +235,8 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm return s; } +} // end namespace internal + /** \relates DenseBase * * Outputs the matrix, to the given stream. @@ -244,7 +253,7 @@ std::ostream & operator << (std::ostream & s, const DenseBase & m) { - return ei_print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); + return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); } #endif // EIGEN_IO_H diff --git a/gtsam/3rdparty/Eigen/src/Core/Map.h b/gtsam/3rdparty/Eigen/src/Core/Map.h index 763948453..692d0a179 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Map.h +++ b/gtsam/3rdparty/Eigen/src/Core/Map.h @@ -44,7 +44,7 @@ * data is laid out contiguously in memory. You can however override this by explicitly specifying * inner and outer strides. * - * Here's an example of simply mapping a contiguous array as a column-major matrix: + * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: * \include Map_simple.cpp * Output: \verbinclude Map_simple.out * @@ -74,12 +74,15 @@ * * This class is the return type of Matrix::Map() but can also be used directly. * - * \sa Matrix::Map() + * \sa Matrix::Map(), \ref TopicStorageOrders */ + +namespace internal { template -struct ei_traits > - : public ei_traits +struct traits > + : public traits { + typedef traits TraitsBase; typedef typename PlainObjectType::Index Index; typedef typename PlainObjectType::Scalar Scalar; enum { @@ -92,21 +95,24 @@ struct ei_traits > HasNoInnerStride = InnerStrideAtCompileTime == 1, HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, HasNoStride = HasNoInnerStride && HasNoOuterStride, - IsAligned = int(int(MapOptions)&Aligned)==Aligned, + IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned), IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, KeepsPacketAccess = bool(HasNoInnerStride) && ( bool(IsDynamicSize) || HasNoOuterStride || ( OuterStrideAtCompileTime!=Dynamic && ((static_cast(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ), - Flags0 = ei_traits::Flags, + Flags0 = TraitsBase::Flags, Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit), - Flags2 = HasNoStride ? int(Flags1) : int(Flags1 & ~LinearAccessBit), - Flags = KeepsPacketAccess ? int(Flags2) : (int(Flags2) & ~PacketAccessBit) + Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime)) + ? int(Flags1) : int(Flags1 & ~LinearAccessBit), + Flags3 = is_lvalue::value ? int(Flags2) : (int(Flags2) & ~LvalueBit), + Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit) }; private: - enum { Options }; // Expressions don't support Options + enum { Options }; // Expressions don't have Options }; +} template class Map : public MapBase > @@ -117,6 +123,15 @@ template class Ma EIGEN_DENSE_PUBLIC_INTERFACE(Map) + typedef typename Base::PointerType PointerType; +#if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API + typedef const Scalar* PointerArgType; + inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast(ptr); } +#else + typedef PointerType PointerArgType; + inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } +#endif + inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; @@ -135,8 +150,8 @@ template class Ma * \param data pointer to the array to map * \param stride optional Stride object, passing the strides. */ - inline Map(const Scalar* data, const StrideType& stride = StrideType()) - : Base(data), m_stride(stride) + inline Map(PointerArgType data, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(data)), m_stride(stride) { PlainObjectType::Base::_check_template_params(); } @@ -147,8 +162,8 @@ template class Ma * \param size the size of the vector expression * \param stride optional Stride object, passing the strides. */ - inline Map(const Scalar* data, Index size, const StrideType& stride = StrideType()) - : Base(data, size), m_stride(stride) + inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(data), size), m_stride(stride) { PlainObjectType::Base::_check_template_params(); } @@ -160,8 +175,8 @@ template class Ma * \param cols the number of columns of the matrix expression * \param stride optional Stride object, passing the strides. */ - inline Map(const Scalar* data, Index rows, Index cols, const StrideType& stride = StrideType()) - : Base(data, rows, cols), m_stride(stride) + inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride) { PlainObjectType::Base::_check_template_params(); } @@ -173,11 +188,18 @@ template class Ma StrideType m_stride; }; +template +inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> + ::Array(const Scalar *data) +{ + this->_set_noalias(Eigen::Map(data)); +} + template inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> ::Matrix(const Scalar *data) { - _set_noalias(Eigen::Map(data)); + this->_set_noalias(Eigen::Map(data)); } #endif // EIGEN_MAP_H diff --git a/gtsam/3rdparty/Eigen/src/Core/MapBase.h b/gtsam/3rdparty/Eigen/src/Core/MapBase.h index 6b1a25ce2..a259e3aee 100644 --- a/gtsam/3rdparty/Eigen/src/Core/MapBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/MapBase.h @@ -26,6 +26,11 @@ #ifndef EIGEN_MAPBASE_H #define EIGEN_MAPBASE_H +#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ + EIGEN_STATIC_ASSERT((int(internal::traits::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) + + /** \class MapBase * \ingroup Core_Module * @@ -33,24 +38,28 @@ * * \sa class Map, class Block */ -template class MapBase - : public ei_dense_xpr_base::type +template class MapBase + : public internal::dense_xpr_base::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; enum { - RowsAtCompileTime = ei_traits::RowsAtCompileTime, - ColsAtCompileTime = ei_traits::ColsAtCompileTime, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, SizeAtCompileTime = Base::SizeAtCompileTime }; - - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar *, + const Scalar *>::type + PointerType; using Base::derived; // using Base::RowsAtCompileTime; @@ -63,10 +72,6 @@ template class MapBase using Base::Flags; using Base::IsRowMajor; - using Base::CoeffReadCost; - -// using Base::derived; - using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; @@ -74,17 +79,14 @@ template class MapBase using Base::coeffRef; using Base::lazyAssign; using Base::eval; -// using Base::operator=; - using Base::operator+=; - using Base::operator-=; - using Base::operator*=; - using Base::operator/=; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; + // bug 217 - compile error on ICC 11.1 + using Base::operator=; typedef typename Base::CoeffReturnType CoeffReturnType; @@ -104,98 +106,150 @@ template class MapBase return m_data[col * colStride() + row * rowStride()]; } - inline Scalar& coeffRef(Index row, Index col) - { - return const_cast(m_data)[col * colStride() + row * rowStride()]; - } - inline const Scalar& coeff(Index index) const { - ei_assert(Derived::IsVectorAtCompileTime || (ei_traits::Flags & LinearAccessBit)); + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return m_data[index * innerStride()]; } - inline Scalar& coeffRef(Index index) + inline const Scalar& coeffRef(Index row, Index col) const { - ei_assert(Derived::IsVectorAtCompileTime || (ei_traits::Flags & LinearAccessBit)); - return const_cast(m_data)[index * innerStride()]; + return this->m_data[col * colStride() + row * rowStride()]; + } + + inline const Scalar& coeffRef(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; } template inline PacketScalar packet(Index row, Index col) const { - return ei_ploadt + return internal::ploadt (m_data + (col * colStride() + row * rowStride())); } template inline PacketScalar packet(Index index) const { - return ei_ploadt(m_data + index * innerStride()); + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return internal::ploadt(m_data + index * innerStride()); } - template - inline void writePacket(Index row, Index col, const PacketScalar& x) - { - ei_pstoret - (const_cast(m_data) + (col * colStride() + row * rowStride()), x); - } - - template - inline void writePacket(Index index, const PacketScalar& x) - { - ei_pstoret - (const_cast(m_data) + index * innerStride(), x); - } - - inline MapBase(const Scalar* data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) + inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) checkSanity(); } - inline MapBase(const Scalar* data, Index size) + inline MapBase(PointerType data, Index size) : m_data(data), m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)), m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime)) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - ei_assert(size >= 0); - ei_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + eigen_assert(size >= 0); + eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); checkSanity(); } - inline MapBase(const Scalar* data, Index rows, Index cols) + inline MapBase(PointerType data, Index rows, Index cols) : m_data(data), m_rows(rows), m_cols(cols) { - ei_assert( (data == 0) + eigen_assert( (data == 0) || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); checkSanity(); } - Derived& operator=(const MapBase& other) - { - Base::operator=(other); - return derived(); - } - - using Base::operator=; - protected: void checkSanity() const { - EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(ei_traits::Flags&PacketAccessBit, - ei_inner_stride_at_compile_time::ret==1), + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits::Flags&PacketAccessBit, + internal::inner_stride_at_compile_time::ret==1), PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); - ei_assert(EIGEN_IMPLIES(ei_traits::Flags&AlignedBit, (size_t(m_data) % (sizeof(Scalar)*ei_packet_traits::size)) == 0) + eigen_assert(EIGEN_IMPLIES(internal::traits::Flags&AlignedBit, (size_t(m_data) % (sizeof(Scalar)*internal::packet_traits::size)) == 0) && "data is not aligned"); } - const Scalar* EIGEN_RESTRICT m_data; - const ei_variable_if_dynamic m_rows; - const ei_variable_if_dynamic m_cols; + PointerType m_data; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; }; +template class MapBase + : public MapBase +{ + public: + + typedef MapBase Base; + + typedef typename Base::Scalar Scalar; + typedef typename Base::PacketScalar PacketScalar; + typedef typename Base::Index Index; + typedef typename Base::PointerType PointerType; + + using Base::derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + inline const Scalar* data() const { return this->m_data; } + inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error + + inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) + { + return this->m_data[col * colStride() + row * rowStride()]; + } + + inline ScalarWithConstIfNotLvalue& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + internal::pstoret + (this->m_data + (col * colStride() + row * rowStride()), x); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + internal::pstoret + (this->m_data + index * innerStride(), x); + } + + inline MapBase(PointerType data) : Base(data) {} + inline MapBase(PointerType data, Index size) : Base(data, size) {} + inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {} + + Derived& operator=(const MapBase& other) + { + Base::Base::operator=(other); + return derived(); + } + + using Base::Base::operator=; +}; + + #endif // EIGEN_MAPBASE_H diff --git a/gtsam/3rdparty/Eigen/src/Core/MathFunctions.h b/gtsam/3rdparty/Eigen/src/Core/MathFunctions.h index 53e576258..2760e67b1 100644 --- a/gtsam/3rdparty/Eigen/src/Core/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/src/Core/MathFunctions.h @@ -25,20 +25,22 @@ #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H -/** \internal \struct ei_global_math_functions_filtering_base +namespace internal { + +/** \internal \struct global_math_functions_filtering_base * * What it does: * Defines a typedef 'type' as follows: * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then - * ei_global_math_functions_filtering_base::type is a typedef for it. - * - otherwise, ei_global_math_functions_filtering_base::type is a typedef for T. + * global_math_functions_filtering_base::type is a typedef for it. + * - otherwise, global_math_functions_filtering_base::type is a typedef for T. * * How it's used: - * To allow to defined the global math functions (like ei_sin...) in certain cases, like the Array expressions. - * When you do ei_sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know - * is that it inherits ArrayBase. So we implement a partial specialization of ei_sin_impl for ArrayBase. - * So we must make sure to use ei_sin_impl > and not ei_sin_impl, otherwise our partial specialization - * won't be used. How does ei_sin know that? That's exactly what ei_global_math_functions_filtering_base tells it. + * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. + * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know + * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. + * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial specialization + * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. * * How it's implemented: * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace @@ -46,32 +48,32 @@ */ template -struct ei_global_math_functions_filtering_base +struct global_math_functions_filtering_base { typedef T type; }; -template struct ei_always_void { typedef void type; }; +template struct always_void { typedef void type; }; template -struct ei_global_math_functions_filtering_base +struct global_math_functions_filtering_base ::type + typename always_void::type > { typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; }; -#define EIGEN_MATHFUNC_IMPL(func, scalar) ei_##func##_impl::type> -#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename ei_##func##_retval::type>::type +#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl::type> +#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval::type>::type /**************************************************************************** -* Implementation of ei_real * +* Implementation of real * ****************************************************************************/ template -struct ei_real_impl +struct real_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) @@ -81,7 +83,7 @@ struct ei_real_impl }; template -struct ei_real_impl > +struct real_impl > { static inline RealScalar run(const std::complex& x) { @@ -90,23 +92,23 @@ struct ei_real_impl > }; template -struct ei_real_retval +struct real_retval { typedef typename NumTraits::Real type; }; template -inline EIGEN_MATHFUNC_RETVAL(real, Scalar) ei_real(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_imag * +* Implementation of imag * ****************************************************************************/ template -struct ei_imag_impl +struct imag_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar&) @@ -116,7 +118,7 @@ struct ei_imag_impl }; template -struct ei_imag_impl > +struct imag_impl > { static inline RealScalar run(const std::complex& x) { @@ -125,23 +127,23 @@ struct ei_imag_impl > }; template -struct ei_imag_retval +struct imag_retval { typedef typename NumTraits::Real type; }; template -inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) ei_imag(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_real_ref * +* Implementation of real_ref * ****************************************************************************/ template -struct ei_real_ref_impl +struct real_ref_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar& run(Scalar& x) @@ -155,29 +157,29 @@ struct ei_real_ref_impl }; template -struct ei_real_ref_retval +struct real_ref_retval { typedef typename NumTraits::Real & type; }; template -inline typename ei_makeconst< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type ei_real_ref(const Scalar& x) +inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) { - return ei_real_ref_impl::run(x); + return real_ref_impl::run(x); } template -inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) ei_real_ref(Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_imag_ref * +* Implementation of imag_ref * ****************************************************************************/ template -struct ei_imag_ref_default_impl +struct imag_ref_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar& run(Scalar& x) @@ -191,7 +193,7 @@ struct ei_imag_ref_default_impl }; template -struct ei_imag_ref_default_impl +struct imag_ref_default_impl { static inline Scalar run(Scalar&) { @@ -204,32 +206,32 @@ struct ei_imag_ref_default_impl }; template -struct ei_imag_ref_impl : ei_imag_ref_default_impl::IsComplex> {}; +struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; template -struct ei_imag_ref_retval +struct imag_ref_retval { typedef typename NumTraits::Real & type; }; template -inline typename ei_makeconst< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type ei_imag_ref(const Scalar& x) +inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) { - return ei_imag_ref_impl::run(x); + return imag_ref_impl::run(x); } template -inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) ei_imag_ref(Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_conj * +* Implementation of conj * ****************************************************************************/ template -struct ei_conj_impl +struct conj_impl { static inline Scalar run(const Scalar& x) { @@ -238,7 +240,7 @@ struct ei_conj_impl }; template -struct ei_conj_impl > +struct conj_impl > { static inline std::complex run(const std::complex& x) { @@ -247,23 +249,23 @@ struct ei_conj_impl > }; template -struct ei_conj_retval +struct conj_retval { typedef Scalar type; }; template -inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) ei_conj(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_abs * +* Implementation of abs * ****************************************************************************/ template -struct ei_abs_impl +struct abs_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) @@ -273,23 +275,23 @@ struct ei_abs_impl }; template -struct ei_abs_retval +struct abs_retval { typedef typename NumTraits::Real type; }; template -inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) ei_abs(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_abs2 * +* Implementation of abs2 * ****************************************************************************/ template -struct ei_abs2_impl +struct abs2_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) @@ -299,7 +301,7 @@ struct ei_abs2_impl }; template -struct ei_abs2_impl > +struct abs2_impl > { static inline RealScalar run(const std::complex& x) { @@ -308,92 +310,92 @@ struct ei_abs2_impl > }; template -struct ei_abs2_retval +struct abs2_retval { typedef typename NumTraits::Real type; }; template -inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) ei_abs2(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_norm1 * +* Implementation of norm1 * ****************************************************************************/ template -struct ei_norm1_default_impl +struct norm1_default_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) { - return ei_abs(ei_real(x)) + ei_abs(ei_imag(x)); + return abs(real(x)) + abs(imag(x)); } }; template -struct ei_norm1_default_impl +struct norm1_default_impl { static inline Scalar run(const Scalar& x) { - return ei_abs(x); + return abs(x); } }; template -struct ei_norm1_impl : ei_norm1_default_impl::IsComplex> {}; +struct norm1_impl : norm1_default_impl::IsComplex> {}; template -struct ei_norm1_retval +struct norm1_retval { typedef typename NumTraits::Real type; }; template -inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) ei_norm1(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_hypot * +* Implementation of hypot * ****************************************************************************/ template -struct ei_hypot_impl +struct hypot_impl { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x, const Scalar& y) { - RealScalar _x = ei_abs(x); - RealScalar _y = ei_abs(y); + RealScalar _x = abs(x); + RealScalar _y = abs(y); RealScalar p = std::max(_x, _y); RealScalar q = std::min(_x, _y); RealScalar qp = q/p; - return p * ei_sqrt(RealScalar(1) + qp*qp); + return p * sqrt(RealScalar(1) + qp*qp); } }; template -struct ei_hypot_retval +struct hypot_retval { typedef typename NumTraits::Real type; }; template -inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) ei_hypot(const Scalar& x, const Scalar& y) +inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); } /**************************************************************************** -* Implementation of ei_cast * +* Implementation of cast * ****************************************************************************/ template -struct ei_cast_impl +struct cast_impl { static inline NewType run(const OldType& x) { @@ -401,20 +403,20 @@ struct ei_cast_impl } }; -// here, for once, we're plainly returning NewType: we don't want ei_cast to do weird things. +// here, for once, we're plainly returning NewType: we don't want cast to do weird things. template -inline NewType ei_cast(const OldType& x) +inline NewType cast(const OldType& x) { - return ei_cast_impl::run(x); + return cast_impl::run(x); } /**************************************************************************** -* Implementation of ei_sqrt * +* Implementation of sqrt * ****************************************************************************/ template -struct ei_sqrt_default_impl +struct sqrt_default_impl { static inline Scalar run(const Scalar& x) { @@ -423,188 +425,72 @@ struct ei_sqrt_default_impl }; template -struct ei_sqrt_default_impl +struct sqrt_default_impl { static inline Scalar run(const Scalar&) { +#ifdef EIGEN2_SUPPORT + eigen_assert(!NumTraits::IsInteger); +#else EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) +#endif return Scalar(0); } }; template -struct ei_sqrt_impl : ei_sqrt_default_impl::IsInteger> {}; +struct sqrt_impl : sqrt_default_impl::IsInteger> {}; template -struct ei_sqrt_retval +struct sqrt_retval { typedef Scalar type; }; template -inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) ei_sqrt(const Scalar& x) +inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x); } /**************************************************************************** -* Implementation of ei_exp * +* Implementation of standard unary real functions (exp, log, sin, cos, ... * ****************************************************************************/ -template -struct ei_exp_default_impl -{ - static inline Scalar run(const Scalar& x) - { - return std::exp(x); +// This macro instanciate all the necessary template mechanism which is common to all unary real functions. +#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \ + template struct NAME##_default_impl { \ + static inline Scalar run(const Scalar& x) { return std::NAME(x); } \ + }; \ + template struct NAME##_default_impl { \ + static inline Scalar run(const Scalar&) { \ + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) \ + return Scalar(0); \ + } \ + }; \ + template struct NAME##_impl \ + : NAME##_default_impl::IsInteger> \ + {}; \ + template struct NAME##_retval { typedef Scalar type; }; \ + template \ + inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) { \ + return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x); \ } -}; -template -struct ei_exp_default_impl -{ - static inline Scalar run(const Scalar&) - { - EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) - return Scalar(0); - } -}; - -template -struct ei_exp_impl : ei_exp_default_impl::IsInteger> {}; - -template -struct ei_exp_retval -{ - typedef Scalar type; -}; - -template -inline EIGEN_MATHFUNC_RETVAL(exp, Scalar) ei_exp(const Scalar& x) -{ - return EIGEN_MATHFUNC_IMPL(exp, Scalar)::run(x); -} +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos) /**************************************************************************** -* Implementation of ei_cos * +* Implementation of atan2 * ****************************************************************************/ template -struct ei_cos_default_impl -{ - static inline Scalar run(const Scalar& x) - { - return std::cos(x); - } -}; - -template -struct ei_cos_default_impl -{ - static inline Scalar run(const Scalar&) - { - EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) - return Scalar(0); - } -}; - -template -struct ei_cos_impl : ei_cos_default_impl::IsInteger> {}; - -template -struct ei_cos_retval -{ - typedef Scalar type; -}; - -template -inline EIGEN_MATHFUNC_RETVAL(cos, Scalar) ei_cos(const Scalar& x) -{ - return EIGEN_MATHFUNC_IMPL(cos, Scalar)::run(x); -} - -/**************************************************************************** -* Implementation of ei_sin * -****************************************************************************/ - -template -struct ei_sin_default_impl -{ - static inline Scalar run(const Scalar& x) - { - return std::sin(x); - } -}; - -template -struct ei_sin_default_impl -{ - static inline Scalar run(const Scalar&) - { - EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) - return Scalar(0); - } -}; - -template -struct ei_sin_impl : ei_sin_default_impl::IsInteger> {}; - -template -struct ei_sin_retval -{ - typedef Scalar type; -}; - -template -inline EIGEN_MATHFUNC_RETVAL(sin, Scalar) ei_sin(const Scalar& x) -{ - return EIGEN_MATHFUNC_IMPL(sin, Scalar)::run(x); -} - -/**************************************************************************** -* Implementation of ei_log * -****************************************************************************/ - -template -struct ei_log_default_impl -{ - static inline Scalar run(const Scalar& x) - { - return std::log(x); - } -}; - -template -struct ei_log_default_impl -{ - static inline Scalar run(const Scalar&) - { - EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) - return Scalar(0); - } -}; - -template -struct ei_log_impl : ei_log_default_impl::IsInteger> {}; - -template -struct ei_log_retval -{ - typedef Scalar type; -}; - -template -inline EIGEN_MATHFUNC_RETVAL(log, Scalar) ei_log(const Scalar& x) -{ - return EIGEN_MATHFUNC_IMPL(log, Scalar)::run(x); -} - -/**************************************************************************** -* Implementation of ei_atan2 * -****************************************************************************/ - -template -struct ei_atan2_default_impl +struct atan2_default_impl { typedef Scalar retval; static inline Scalar run(const Scalar& x, const Scalar& y) @@ -614,7 +500,7 @@ struct ei_atan2_default_impl }; template -struct ei_atan2_default_impl +struct atan2_default_impl { static inline Scalar run(const Scalar&, const Scalar&) { @@ -624,26 +510,26 @@ struct ei_atan2_default_impl }; template -struct ei_atan2_impl : ei_atan2_default_impl::IsInteger> {}; +struct atan2_impl : atan2_default_impl::IsInteger> {}; template -struct ei_atan2_retval +struct atan2_retval { typedef Scalar type; }; template -inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) ei_atan2(const Scalar& x, const Scalar& y) +inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y); } /**************************************************************************** -* Implementation of ei_pow * +* Implementation of pow * ****************************************************************************/ template -struct ei_pow_default_impl +struct pow_default_impl { typedef Scalar retval; static inline Scalar run(const Scalar& x, const Scalar& y) @@ -653,12 +539,12 @@ struct ei_pow_default_impl }; template -struct ei_pow_default_impl +struct pow_default_impl { static inline Scalar run(Scalar x, Scalar y) { Scalar res = 1; - ei_assert(!NumTraits::IsSigned || y >= 0); + eigen_assert(!NumTraits::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; while(y) @@ -672,47 +558,47 @@ struct ei_pow_default_impl }; template -struct ei_pow_impl : ei_pow_default_impl::IsInteger> {}; +struct pow_impl : pow_default_impl::IsInteger> {}; template -struct ei_pow_retval +struct pow_retval { typedef Scalar type; }; template -inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) ei_pow(const Scalar& x, const Scalar& y) +inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y); } /**************************************************************************** -* Implementation of ei_random * +* Implementation of random * ****************************************************************************/ template -struct ei_random_default_impl {}; +struct random_default_impl {}; template -struct ei_random_impl : ei_random_default_impl::IsComplex, NumTraits::IsInteger> {}; +struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; template -struct ei_random_retval +struct random_retval { typedef Scalar type; }; -template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) ei_random(const Scalar& x, const Scalar& y); -template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) ei_random(); +template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); +template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); template -struct ei_random_default_impl +struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { - return x + (y-x) * Scalar(std::rand()) / float(RAND_MAX); + return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); } static inline Scalar run() { @@ -720,42 +606,102 @@ struct ei_random_default_impl } }; -template -struct ei_random_default_impl +enum { + floor_log2_terminate, + floor_log2_move_up, + floor_log2_move_down, + floor_log2_bogus +}; + +template struct floor_log2_selector { + enum { middle = (lower + upper) / 2, + value = (upper <= lower + 1) ? int(floor_log2_terminate) + : (n < (1 << middle)) ? int(floor_log2_move_down) + : (n==0) ? int(floor_log2_bogus) + : int(floor_log2_move_up) + }; +}; + +template::value> +struct floor_log2 {}; + +template +struct floor_log2 +{ + enum { value = floor_log2::middle>::value }; +}; + +template +struct floor_log2 +{ + enum { value = floor_log2::middle, upper>::value }; +}; + +template +struct floor_log2 +{ + enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; +}; + +template +struct floor_log2 +{ + // no value, error at compile time +}; + +template +struct random_default_impl +{ + typedef typename NumTraits::NonInteger NonInteger; + static inline Scalar run(const Scalar& x, const Scalar& y) { - return x + Scalar((y-x+1) * (std::rand() / (RAND_MAX + typename NumTraits::NonInteger(1)))); + return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1))); } + static inline Scalar run() { +#ifdef EIGEN_MAKING_DOCS return run(Scalar(NumTraits::IsSigned ? -10 : 0), Scalar(10)); +#else + enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value, + scalar_bits = sizeof(Scalar) * CHAR_BIT, + shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)) + }; + Scalar x = Scalar(std::rand() >> shift); + Scalar offset = NumTraits::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0); + return x - offset; +#endif } }; template -struct ei_random_default_impl +struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) { - return Scalar(ei_random(ei_real(x), ei_real(y)), - ei_random(ei_imag(x), ei_imag(y))); + return Scalar(random(real(x), real(y)), + random(imag(x), imag(y))); } static inline Scalar run() { typedef typename NumTraits::Real RealScalar; - return Scalar(ei_random(), ei_random()); + return Scalar(random(), random()); } }; template -inline EIGEN_MATHFUNC_RETVAL(random, Scalar) ei_random(const Scalar& x, const Scalar& y) +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); } template -inline EIGEN_MATHFUNC_RETVAL(random, Scalar) ei_random() +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); } @@ -767,20 +713,20 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) ei_random() template -struct ei_scalar_fuzzy_default_impl {}; +struct scalar_fuzzy_default_impl {}; template -struct ei_scalar_fuzzy_default_impl +struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { - return ei_abs(x) <= ei_abs(y) * prec; + return abs(x) <= abs(y) * prec; } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { - return ei_abs(x - y) <= std::min(ei_abs(x), ei_abs(y)) * prec; + return abs(x - y) <= std::min(abs(x), abs(y)) * prec; } static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { @@ -789,7 +735,7 @@ struct ei_scalar_fuzzy_default_impl }; template -struct ei_scalar_fuzzy_default_impl +struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template @@ -808,62 +754,78 @@ struct ei_scalar_fuzzy_default_impl }; template -struct ei_scalar_fuzzy_default_impl +struct scalar_fuzzy_default_impl { typedef typename NumTraits::Real RealScalar; template static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { - return ei_abs2(x) <= ei_abs2(y) * prec * prec; + return abs2(x) <= abs2(y) * prec * prec; } static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { - return ei_abs2(x - y) <= std::min(ei_abs2(x), ei_abs2(y)) * prec * prec; + return abs2(x - y) <= std::min(abs2(x), abs2(y)) * prec * prec; } }; template -struct ei_scalar_fuzzy_impl : ei_scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; +struct scalar_fuzzy_impl : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; template -inline bool ei_isMuchSmallerThan(const Scalar& x, const OtherScalar& y, +inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { - return ei_scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); + return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); } template -inline bool ei_isApprox(const Scalar& x, const Scalar& y, +inline bool isApprox(const Scalar& x, const Scalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { - return ei_scalar_fuzzy_impl::isApprox(x, y, precision); + return scalar_fuzzy_impl::isApprox(x, y, precision); } template -inline bool ei_isApproxOrLessThan(const Scalar& x, const Scalar& y, +inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, typename NumTraits::Real precision = NumTraits::dummy_precision()) { - return ei_scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); + return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); } /****************************************** *** The special case of the bool type *** ******************************************/ -template<> struct ei_random_impl +template<> struct random_impl { static inline bool run() { - return ei_random(0,1)==0 ? false : true; + return random(0,1)==0 ? false : true; } }; -template<> struct ei_scalar_fuzzy_impl +template<> struct scalar_fuzzy_impl { + typedef bool RealScalar; + + template + static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) + { + return !x; + } + static inline bool isApprox(bool x, bool y, bool) { return x == y; } + + static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) + { + return (!x) || y; + } + }; +} // end namespace internal + #endif // EIGEN_MATHFUNCTIONS_H diff --git a/gtsam/3rdparty/Eigen/src/Core/Matrix.h b/gtsam/3rdparty/Eigen/src/Core/Matrix.h index 9d8ff6640..8ae55da6e 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Matrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/Matrix.h @@ -45,7 +45,7 @@ * The remaining template parameters are optional -- in most cases you don't have to worry about them. * \tparam _Options \anchor matrix_tparam_options A combination of either \b RowMajor or \b ColMajor, and of either * \b AutoAlign or \b DontAlign. - * The former controls storage order, and defaults to column-major. The latter controls alignment, which is required + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note"). * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note"). @@ -79,6 +79,9 @@ * m(0, 3) = 3; * \endcode * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. + * * Some notes: * *
@@ -107,10 +110,13 @@ * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic. *
* - * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy + * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, + * \ref TopicStorageOrders */ + +namespace internal { template -struct ei_traits > +struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; @@ -121,24 +127,25 @@ struct ei_traits > ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _MaxRows, MaxColsAtCompileTime = _MaxCols, - Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, + Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, CoeffReadCost = NumTraits::ReadCost, Options = _Options, InnerStrideAtCompileTime = 1, OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime }; }; +} template class Matrix - : public DenseStorageBase > + : public PlainObjectBase > { public: /** \brief Base class typedef. - * \sa DenseStorageBase + * \sa PlainObjectBase */ - typedef DenseStorageBase Base; + typedef PlainObjectBase Base; enum { Options = _Options }; @@ -217,8 +224,8 @@ class Matrix } // FIXME is it still needed - Matrix(ei_constructor_without_unaligned_array_assert) - : Base(ei_constructor_without_unaligned_array_assert()) + Matrix(internal::constructor_without_unaligned_array_assert) + : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED } /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors @@ -232,8 +239,8 @@ class Matrix { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix) - ei_assert(dim > 0); - ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); + eigen_assert(dim >= 0); + eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED } @@ -282,6 +289,11 @@ class Matrix EIGEN_STRONG_INLINE Matrix(const MatrixBase& other) : Base(other.rows() * other.cols(), other.rows(), other.cols()) { + // This test resides here, to bring the error messages closer to the user. Normally, these checks + // are performed deeply within the library, thus causing long and scary error traces. + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + Base::_check_template_params(); Base::_set_noalias(other); } @@ -320,7 +332,7 @@ class Matrix * of same type it is enough to swap the data pointers. */ template - void swap(MatrixBase EIGEN_REF_TO_TEMPORARY other) + void swap(MatrixBase const & other) { this->_swap(other.derived()); } inline Index innerStride() const { return 1; } @@ -333,6 +345,13 @@ class Matrix template Matrix& operator=(const RotationBase& r); + #ifdef EIGEN2_SUPPORT + template + explicit Matrix(const eigen2_RotationBase& r); + template + Matrix& operator=(const eigen2_RotationBase& r); + #endif + // allow to extend Matrix outside Eigen #ifdef EIGEN_MATRIX_PLUGIN #include EIGEN_MATRIX_PLUGIN @@ -340,7 +359,7 @@ class Matrix protected: template - friend struct ei_conservative_resize_like_impl; + friend struct internal::conservative_resize_like_impl; using Base::m_storage; }; diff --git a/gtsam/3rdparty/Eigen/src/Core/MatrixBase.h b/gtsam/3rdparty/Eigen/src/Core/MatrixBase.h index f324a0870..f0c7fc7a1 100644 --- a/gtsam/3rdparty/Eigen/src/Core/MatrixBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/MatrixBase.h @@ -38,7 +38,7 @@ * Note that some methods are defined in other modules such as the \ref LU_Module LU module * for all functions related to matrix inversions. * - * \param Derived is the derived type, e.g. a matrix type, or an expression, etc. + * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc. * * When writing a function taking Eigen objects as argument, if you want your function * to take as argument any matrix, vector, or expression, just let it take a @@ -53,6 +53,9 @@ } * \endcode * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN. + * * \sa \ref TopicClassHierarchy */ template class MatrixBase @@ -61,10 +64,10 @@ template class MatrixBase public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef MatrixBase StorageBaseType; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; @@ -93,6 +96,7 @@ template class MatrixBase using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; + typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType; typedef typename Base::RowXpr RowXpr; typedef typename Base::ColXpr ColXpr; #endif // not EIGEN_PARSED_BY_DOXYGEN @@ -115,30 +119,30 @@ template class MatrixBase * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed * that the return type of eval() is either PlainObject or const PlainObject&. */ - typedef Matrix::Scalar, - ei_traits::RowsAtCompileTime, - ei_traits::ColsAtCompileTime, - AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), - ei_traits::MaxRowsAtCompileTime, - ei_traits::MaxColsAtCompileTime + typedef Matrix::Scalar, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime > PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ - typedef CwiseNullaryOp,Derived> ConstantReturnType; + typedef CwiseNullaryOp,Derived> ConstantReturnType; /** \internal the return type of MatrixBase::adjoint() */ - typedef typename ei_meta_if::IsComplex, - CwiseUnaryOp, Eigen::Transpose >, - Transpose - >::ret AdjointReturnType; + typedef typename internal::conditional::IsComplex, + CwiseUnaryOp, ConstTransposeReturnType>, + ConstTransposeReturnType + >::type AdjointReturnType; /** \internal Return type of eigenvalues() */ - typedef Matrix, ei_traits::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType; + typedef Matrix, internal::traits::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType; /** \internal the return type of identity */ - typedef CwiseNullaryOp,Derived> IdentityReturnType; + typedef CwiseNullaryOp,Derived> IdentityReturnType; /** \internal the return type of unit vectors */ - typedef Block, SquareMatrixType>, - ei_traits::RowsAtCompileTime, - ei_traits::ColsAtCompileTime> BasisReturnType; + typedef Block, SquareMatrixType>, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime> BasisReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase @@ -200,7 +204,14 @@ template class MatrixBase operator*(const DiagonalBase &diagonal) const; template - Scalar dot(const MatrixBase& other) const; + typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType + dot(const MatrixBase& other) const; + + #ifdef EIGEN2_SUPPORT + template + Scalar eigen2_dot(const MatrixBase& other) const; + #endif + RealScalar squaredNorm() const; RealScalar norm() const; RealScalar stableNorm() const; @@ -212,23 +223,49 @@ template class MatrixBase const AdjointReturnType adjoint() const; void adjointInPlace(); - Diagonal diagonal(); - const Diagonal diagonal() const; + typedef Diagonal DiagonalReturnType; + DiagonalReturnType diagonal(); + typedef const Diagonal ConstDiagonalReturnType; + const ConstDiagonalReturnType diagonal() const; - template Diagonal diagonal(); - template const Diagonal diagonal() const; + template struct DiagonalIndexReturnType { typedef Diagonal Type; }; + template struct ConstDiagonalIndexReturnType { typedef const Diagonal Type; }; - Diagonal diagonal(Index index); - const Diagonal diagonal(Index index) const; + template typename DiagonalIndexReturnType::Type diagonal(); + template typename ConstDiagonalIndexReturnType::Type diagonal() const; - template TriangularView part(); - template const TriangularView part() const; + // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. + // On the other hand they confuse MSVC8... + #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later + typename MatrixBase::template DiagonalIndexReturnType::Type diagonal(Index index); + typename MatrixBase::template ConstDiagonalIndexReturnType::Type diagonal(Index index) const; + #else + typename DiagonalIndexReturnType::Type diagonal(Index index); + typename ConstDiagonalIndexReturnType::Type diagonal(Index index) const; + #endif - template TriangularView triangularView(); - template const TriangularView triangularView() const; + #ifdef EIGEN2_SUPPORT + template typename internal::eigen2_part_return_type::type part(); + template const typename internal::eigen2_part_return_type::type part() const; + + // huuuge hack. make Eigen2's matrix.part() work in eigen3. Problem: Diagonal is now a class template instead + // of an integer constant. Solution: overload the part() method template wrt template parameters list. + template class U> + const DiagonalWrapper part() const + { return diagonal().asDiagonal(); } + #endif // EIGEN2_SUPPORT - template SelfAdjointView selfadjointView(); - template const SelfAdjointView selfadjointView() const; + template struct TriangularViewReturnType { typedef TriangularView Type; }; + template struct ConstTriangularViewReturnType { typedef const TriangularView Type; }; + + template typename TriangularViewReturnType::Type triangularView(); + template typename ConstTriangularViewReturnType::Type triangularView() const; + + template struct SelfAdjointViewReturnType { typedef SelfAdjointView Type; }; + template struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView Type; }; + + template typename SelfAdjointViewReturnType::Type selfadjointView(); + template typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; const SparseView sparseView(const Scalar& m_reference = Scalar(0), typename NumTraits::Real m_epsilon = NumTraits::dummy_precision()) const; @@ -241,7 +278,8 @@ template class MatrixBase static const BasisReturnType UnitZ(); static const BasisReturnType UnitW(); - const DiagonalWrapper asDiagonal() const; + const DiagonalWrapper asDiagonal() const; + const PermutationWrapper asPermutation() const; Derived& setIdentity(); Derived& setIdentity(Index rows, Index cols); @@ -277,8 +315,8 @@ template class MatrixBase inline const ForceAlignedAccess forceAlignedAccess() const; inline ForceAlignedAccess forceAlignedAccess(); - template inline typename ei_makeconst,Derived&>::ret>::type forceAlignedAccessIf() const; - template inline typename ei_meta_if,Derived&>::ret forceAlignedAccessIf(); + template inline typename internal::add_const_on_value_type,Derived&>::type>::type forceAlignedAccessIf() const; + template inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); Scalar trace() const; @@ -298,8 +336,27 @@ template class MatrixBase const FullPivLU fullPivLu() const; const PartialPivLU partialPivLu() const; + + #if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS + const LU lu() const; + #endif + + #ifdef EIGEN2_SUPPORT + const LU eigen2_lu() const; + #endif + + #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS const PartialPivLU lu() const; - const ei_inverse_impl inverse() const; + #endif + + #ifdef EIGEN2_SUPPORT + template + void computeInverse(MatrixBase *result) const { + *result = this->inverse(); + } + #endif + + const internal::inverse_impl inverse() const; template void computeInverseAndDetWithCheck( ResultType& inverse, @@ -325,37 +382,57 @@ template class MatrixBase const HouseholderQR householderQr() const; const ColPivHouseholderQR colPivHouseholderQr() const; const FullPivHouseholderQR fullPivHouseholderQr() const; + + #ifdef EIGEN2_SUPPORT + const QR qr() const; + #endif EigenvaluesReturnType eigenvalues() const; RealScalar operatorNorm() const; /////////// SVD module /////////// + JacobiSVD jacobiSvd(unsigned int computationOptions = 0) const; + + #ifdef EIGEN2_SUPPORT + SVD svd() const; + #endif + /////////// Geometry module /////////// + #ifndef EIGEN_PARSED_BY_DOXYGEN + /// \internal helper struct to form the return type of the cross product + template struct cross_product_return_type { + typedef typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType Scalar; + typedef Matrix type; + }; + #endif // EIGEN_PARSED_BY_DOXYGEN template - PlainObject cross(const MatrixBase& other) const; + typename cross_product_return_type::type + cross(const MatrixBase& other) const; template PlainObject cross3(const MatrixBase& other) const; PlainObject unitOrthogonal(void) const; Matrix eulerAngles(Index a0, Index a1, Index a2) const; + + #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS ScalarMultipleReturnType operator*(const UniformScaling& s) const; - enum { - SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 - }; - typedef Block::ColsAtCompileTime==1 ? SizeMinusOne : 1, - ei_traits::ColsAtCompileTime==1 ? 1 : SizeMinusOne> StartMinusOne; - typedef CwiseUnaryOp::Scalar>, - StartMinusOne > HNormalizedReturnType; - - HNormalizedReturnType hnormalized() const; - // put this as separate enum value to work around possible GCC 4.3 bug (?) enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1?Vertical:Horizontal }; typedef Homogeneous HomogeneousReturnType; - HomogeneousReturnType homogeneous() const; + #endif + + enum { + SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 + }; + typedef Block::ColsAtCompileTime==1 ? SizeMinusOne : 1, + internal::traits::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne; + typedef CwiseUnaryOp::Scalar>, + const ConstStartMinusOne > HNormalizedReturnType; + + const HNormalizedReturnType hnormalized() const; ////////// Householder module /////////// @@ -375,13 +452,13 @@ template class MatrixBase ///////// Jacobi module ///////// template - void applyOnTheLeft(Index p, Index q, const PlanarRotation& j); + void applyOnTheLeft(Index p, Index q, const JacobiRotation& j); template - void applyOnTheRight(Index p, Index q, const PlanarRotation& j); + void applyOnTheRight(Index p, Index q, const JacobiRotation& j); ///////// MatrixFunctions module ///////// - typedef typename ei_stem_function::type StemFunction; + typedef typename internal::stem_function::type StemFunction; const MatrixExponentialReturnValue exp() const; const MatrixFunctionReturnValue matrixFunction(StemFunction f) const; const MatrixFunctionReturnValue cosh() const; @@ -412,13 +489,13 @@ template class MatrixBase inline Cwise cwise(); VectorBlock start(Index size); - const VectorBlock start(Index size) const; + const VectorBlock start(Index size) const; VectorBlock end(Index size); - const VectorBlock end(Index size) const; + const VectorBlock end(Index size) const; template VectorBlock start(); - template const VectorBlock start() const; + template const VectorBlock start() const; template VectorBlock end(); - template const VectorBlock end() const; + template const VectorBlock end() const; Minor minor(Index row, Index col); const Minor minor(Index row, Index col) const; @@ -433,10 +510,10 @@ template class MatrixBase template explicit MatrixBase(const MatrixBase&); protected: // mixing arrays and matrices is not legal - template Derived& operator+=(const ArrayBase& array) + template Derived& operator+=(const ArrayBase& ) {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} // mixing arrays and matrices is not legal - template Derived& operator-=(const ArrayBase& array) + template Derived& operator-=(const ArrayBase& ) {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);} }; diff --git a/gtsam/3rdparty/Eigen/src/Core/NestByValue.h b/gtsam/3rdparty/Eigen/src/Core/NestByValue.h index 885002e26..a6104d2a4 100644 --- a/gtsam/3rdparty/Eigen/src/Core/NestByValue.h +++ b/gtsam/3rdparty/Eigen/src/Core/NestByValue.h @@ -38,16 +38,19 @@ * * \sa MatrixBase::nestByValue() */ + +namespace internal { template -struct ei_traits > : public ei_traits +struct traits > : public traits {}; +} template class NestByValue - : public ei_dense_xpr_base< NestByValue >::type + : public internal::dense_xpr_base< NestByValue >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue) inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} diff --git a/gtsam/3rdparty/Eigen/src/Core/NoAlias.h b/gtsam/3rdparty/Eigen/src/Core/NoAlias.h index 53ad3bfee..da64affcf 100644 --- a/gtsam/3rdparty/Eigen/src/Core/NoAlias.h +++ b/gtsam/3rdparty/Eigen/src/Core/NoAlias.h @@ -51,17 +51,17 @@ class NoAlias * \sa MatrixBase::lazyAssign() */ template EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) - { return ei_assign_selector::run(m_expression,other.derived()); } + { return internal::assign_selector::run(m_expression,other.derived()); } /** \sa MatrixBase::operator+= */ template EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase& other) { - typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; + typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; SelfAdder tmp(m_expression); - typedef typename ei_nested::type OtherDerivedNested; - typedef typename ei_cleantype::type _OtherDerivedNested; - ei_assign_selector::run(tmp,OtherDerivedNested(other.derived())); + typedef typename internal::nested::type OtherDerivedNested; + typedef typename internal::remove_all::type _OtherDerivedNested; + internal::assign_selector::run(tmp,OtherDerivedNested(other.derived())); return m_expression; } @@ -69,11 +69,11 @@ class NoAlias template EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase& other) { - typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; + typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; SelfAdder tmp(m_expression); - typedef typename ei_nested::type OtherDerivedNested; - typedef typename ei_cleantype::type _OtherDerivedNested; - ei_assign_selector::run(tmp,OtherDerivedNested(other.derived())); + typedef typename internal::nested::type OtherDerivedNested; + typedef typename internal::remove_all::type _OtherDerivedNested; + internal::assign_selector::run(tmp,OtherDerivedNested(other.derived())); return m_expression; } diff --git a/gtsam/3rdparty/Eigen/src/Core/NumTraits.h b/gtsam/3rdparty/Eigen/src/Core/NumTraits.h index 9e6e35a04..5c7762dae 100644 --- a/gtsam/3rdparty/Eigen/src/Core/NumTraits.h +++ b/gtsam/3rdparty/Eigen/src/Core/NumTraits.h @@ -40,7 +40,7 @@ * is a typedef to \a U. * \li A typedef \a NonInteger, giving the type that should be used for operations producing non-integral values, * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives - * \a T again. Note however that many Eigen functions such as ei_sqrt simply refuse to + * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is * only intended as a helper for code that needs to explicitly promote types. * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what @@ -53,6 +53,8 @@ * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers. * Stay vague here. No need to do architecture-specific stuff. * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned. + * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must + * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise. * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), returns a \a Real instead of a \a T. * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default * value by the fuzzy comparison operators. @@ -65,17 +67,18 @@ template struct GenericNumTraits IsInteger = std::numeric_limits::is_integer, IsSigned = std::numeric_limits::is_signed, IsComplex = 0, + RequireInitialization = internal::is_arithmetic::value ? 0 : 1, ReadCost = 1, AddCost = 1, MulCost = 1 }; typedef T Real; - typedef typename ei_meta_if< + typedef typename internal::conditional< IsInteger, - typename ei_meta_if::ret, + typename internal::conditional::type, T - >::ret NonInteger; + >::type NonInteger; typedef T Nested; inline static Real epsilon() { return std::numeric_limits::epsilon(); } @@ -86,6 +89,13 @@ template struct GenericNumTraits } inline static T highest() { return std::numeric_limits::max(); } inline static T lowest() { return IsInteger ? std::numeric_limits::min() : (-std::numeric_limits::max()); } + +#ifdef EIGEN2_SUPPORT + enum { + HasFloatingPoint = !IsInteger + }; + typedef NonInteger FloatingPoint; +#endif }; template struct NumTraits : GenericNumTraits @@ -114,6 +124,7 @@ template struct NumTraits > typedef _Real Real; enum { IsComplex = 1, + RequireInitialization = NumTraits<_Real>::RequireInitialization, ReadCost = 2 * NumTraits<_Real>::ReadCost, AddCost = 2 * NumTraits::AddCost, MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost @@ -137,6 +148,7 @@ struct NumTraits > IsComplex = NumTraits::IsComplex, IsInteger = NumTraits::IsInteger, IsSigned = NumTraits::IsSigned, + RequireInitialization = 1, ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::ReadCost, AddCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::AddCost, MulCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::MulCost diff --git a/gtsam/3rdparty/Eigen/src/Core/PermutationMatrix.h b/gtsam/3rdparty/Eigen/src/Core/PermutationMatrix.h index afe37ef6d..a064e053e 100644 --- a/gtsam/3rdparty/Eigen/src/Core/PermutationMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/PermutationMatrix.h @@ -2,7 +2,7 @@ // for linear algebra. // // Copyright (C) 2009 Benoit Jacob -// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -26,15 +26,17 @@ #ifndef EIGEN_PERMUTATIONMATRIX_H #define EIGEN_PERMUTATIONMATRIX_H -/** \class PermutationMatrix +template class PermutedImpl; + +/** \class PermutationBase * \ingroup Core_Module * - * \brief Permutation matrix + * \brief Base class for permutations * - * \param SizeAtCompileTime the number of rows/cols, or Dynamic - * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. + * \param Derived the derived class * - * This class represents a permutation matrix, internally stored as a vector of integers. + * This class is the base class for all expressions representing a permutation matrix, + * internally stored as a vector of integers. * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have: * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f] @@ -44,26 +46,29 @@ * Permutation matrices are square and invertible. * * Notice that in addition to the member functions and operators listed here, there also are non-member - * operator* to multiply a PermutationMatrix with any kind of matrix expression (MatrixBase) on either side. + * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase) + * on either side. * - * \sa class DiagonalMatrix + * \sa class PermutationMatrix, class PermutationWrapper */ -template struct ei_permut_matrix_product_retval; -template -struct ei_traits > - : ei_traits > -{}; +namespace internal { -template -class PermutationMatrix : public EigenBase > +template +struct permut_matrix_product_retval; +enum PermPermProduct_t {PermPermProduct}; + +} // end namespace internal + +template +class PermutationBase : public EigenBase { + typedef internal::traits Traits; + typedef EigenBase Base; public: #ifndef EIGEN_PARSED_BY_DOXYGEN - typedef ei_traits Traits; - typedef Matrix - DenseMatrixType; + typedef typename Traits::IndicesType IndicesType; enum { Flags = Traits::Flags, CoeffReadCost = Traits::CoeffReadCost, @@ -74,9 +79,227 @@ class PermutationMatrix : public EigenBase + DenseMatrixType; + typedef PermutationMatrix + PlainPermutationType; + using Base::derived; #endif - typedef Matrix IndicesType; + /** Copies the other permutation into *this */ + template + Derived& operator=(const PermutationBase& other) + { + indices() = other.indices(); + return derived(); + } + + /** Assignment from the Transpositions \a tr */ + template + Derived& operator=(const TranspositionsBase& tr) + { + setIdentity(tr.size()); + for(Index k=size()-1; k>=0; --k) + applyTranspositionOnTheRight(k,tr.coeff(k)); + return derived(); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Derived& operator=(const PermutationBase& other) + { + indices() = other.indices(); + return derived(); + } + #endif + + /** \returns the number of rows */ + inline Index rows() const { return indices().size(); } + + /** \returns the number of columns */ + inline Index cols() const { return indices().size(); } + + /** \returns the size of a side of the respective square matrix, i.e., the number of indices */ + inline Index size() const { return indices().size(); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const + { + other.setZero(); + for (int i=0; i=0 && j>=0 && i=0 && j>=0 && i inverse() const + { return derived(); } + /** \returns the tranpose permutation matrix. + * + * \note \note_try_to_help_rvo + */ + inline Transpose transpose() const + { return derived(); } + + /**** multiplication helpers to hopefully get RVO ****/ + + +#ifndef EIGEN_PARSED_BY_DOXYGEN + protected: + template + void assignTranspose(const PermutationBase& other) + { + for (int i=0; i + void assignProduct(const Lhs& lhs, const Rhs& rhs) + { + eigen_assert(lhs.cols() == rhs.rows()); + for (int i=0; i + inline PlainPermutationType operator*(const PermutationBase& other) const + { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); } + + /** \returns the product of a permutation with another inverse permutation. + * + * \note \note_try_to_help_rvo + */ + template + inline PlainPermutationType operator*(const Transpose >& other) const + { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); } + + /** \returns the product of an inverse permutation with another permutation. + * + * \note \note_try_to_help_rvo + */ + template friend + inline PlainPermutationType operator*(const Transpose >& other, const PermutationBase& perm) + { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); } + + protected: + +}; + +/** \class PermutationMatrix + * \ingroup Core_Module + * + * \brief Permutation matrix + * + * \param SizeAtCompileTime the number of rows/cols, or Dynamic + * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. + * \param IndexType the interger type of the indices + * + * This class represents a permutation matrix, internally stored as a vector of integers. + * + * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix + */ + +namespace internal { +template +struct traits > + : traits > +{ + typedef IndexType Index; + typedef Matrix IndicesType; +}; +} + +template +class PermutationMatrix : public PermutationBase > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + #endif inline PermutationMatrix() {} @@ -87,8 +310,8 @@ class PermutationMatrix : public EigenBase - inline PermutationMatrix(const PermutationMatrix& other) + template + inline PermutationMatrix(const PermutationBase& other) : m_indices(other.indices()) {} #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -109,29 +332,26 @@ class PermutationMatrix : public EigenBase - explicit PermutationMatrix(const Transpositions& tr) + template + explicit PermutationMatrix(const TranspositionsBase& tr) : m_indices(tr.size()) { *this = tr; } /** Copies the other permutation into *this */ - template - PermutationMatrix& operator=(const PermutationMatrix& other) + template + PermutationMatrix& operator=(const PermutationBase& other) { m_indices = other.indices(); return *this; } /** Assignment from the Transpositions \a tr */ - template - PermutationMatrix& operator=(const Transpositions& tr) + template + PermutationMatrix& operator=(const TranspositionsBase& tr) { - setIdentity(tr.size()); - for(Index k=size()-1; k>=0; --k) - applyTranspositionOnTheRight(k,tr.coeff(k)); - return *this; + return Base::operator=(tr.derived()); } #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -145,197 +365,195 @@ class PermutationMatrix : public EigenBase - void evalTo(MatrixBase& other) const - { - other.setZero(); - for (int i=0; i=0 && j>=0 && i=0 && j>=0 && i inverse() const - { return *this; } - /** \returns the tranpose permutation matrix. - * - * \note \note_try_to_help_rvo - */ - inline Transpose transpose() const - { return *this; } /**** multiplication helpers to hopefully get RVO ****/ #ifndef EIGEN_PARSED_BY_DOXYGEN - template - PermutationMatrix(const Transpose >& other) + template + PermutationMatrix(const Transpose >& other) : m_indices(other.nestedPermutation().size()) { - for (int i=0; i + PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) + : m_indices(lhs.indices().size()) { - ei_assert(lhs.cols() == rhs.rows()); - for (int i=0; i - inline PermutationMatrix operator*(const PermutationMatrix& other) const - { return PermutationMatrix(Product, *this, other); } - - /** \returns the product of a permutation with another inverse permutation. - * - * \note \note_try_to_help_rvo - */ - template - inline PermutationMatrix operator*(const Transpose >& other) const - { return PermutationMatrix(Product, *this, other.eval()); } - - /** \returns the product of an inverse permutation with another permutation. - * - * \note \note_try_to_help_rvo - */ - template friend - inline PermutationMatrix operator*(const Transpose >& other, const PermutationMatrix& perm) - { return PermutationMatrix(Product, other.eval(), perm); } - protected: IndicesType m_indices; }; + +namespace internal { +template +struct traits,_PacketAccess> > + : traits > +{ + typedef IndexType Index; + typedef Map, _PacketAccess> IndicesType; +}; +} + +template +class Map,_PacketAccess> + : public PermutationBase,_PacketAccess> > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar Index; + #endif + + inline Map(const Index* indices) + : m_indices(indices) + {} + + inline Map(const Index* indices, Index size) + : m_indices(indices,size) + {} + + /** Copies the other permutation into *this */ + template + Map& operator=(const PermutationBase& other) + { return Base::operator=(other.derived()); } + + /** Assignment from the Transpositions \a tr */ + template + Map& operator=(const TranspositionsBase& tr) + { return Base::operator=(tr.derived()); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Map& operator=(const Map& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + protected: + + IndicesType m_indices; +}; + +/** \class PermutationWrapper + * \ingroup Core_Module + * + * \brief Class to view a vector of integers as a permutation matrix + * + * \param _IndicesType the type of the vector of integer (can be any compatible expression) + * + * This class allows to view any vector expression of integers as a permutation matrix. + * + * \sa class PermutationBase, class PermutationMatrix + */ + +struct PermutationStorage {}; + +template class TranspositionsWrapper; +namespace internal { +template +struct traits > +{ + typedef PermutationStorage StorageKind; + typedef typename _IndicesType::Scalar Scalar; + typedef typename _IndicesType::Scalar Index; + typedef _IndicesType IndicesType; + enum { + RowsAtCompileTime = _IndicesType::SizeAtCompileTime, + ColsAtCompileTime = _IndicesType::SizeAtCompileTime, + MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime, + Flags = 0, + CoeffReadCost = _IndicesType::CoeffReadCost + }; +}; +} + +template +class PermutationWrapper : public PermutationBase > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + #endif + + inline PermutationWrapper(const IndicesType& indices) + : m_indices(indices) + {} + + /** const version of indices(). */ + const typename internal::remove_all::type& + indices() const { return m_indices; } + + protected: + + const typename IndicesType::Nested m_indices; +}; + /** \returns the matrix with the permutation applied to the columns. */ -template -inline const ei_permut_matrix_product_retval, Derived, OnTheRight> +template +inline const internal::permut_matrix_product_retval operator*(const MatrixBase& matrix, - const PermutationMatrix &permutation) + const PermutationBase &permutation) { - return ei_permut_matrix_product_retval - , Derived, OnTheRight> - (permutation, matrix.derived()); + return internal::permut_matrix_product_retval + + (permutation.derived(), matrix.derived()); } /** \returns the matrix with the permutation applied to the rows. */ -template -inline const ei_permut_matrix_product_retval - , Derived, OnTheLeft> -operator*(const PermutationMatrix &permutation, +template +inline const internal::permut_matrix_product_retval + +operator*(const PermutationBase &permutation, const MatrixBase& matrix) { - return ei_permut_matrix_product_retval - , Derived, OnTheLeft> - (permutation, matrix.derived()); + return internal::permut_matrix_product_retval + + (permutation.derived(), matrix.derived()); } +namespace internal { + template -struct ei_traits > +struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template -struct ei_permut_matrix_product_retval - : public ReturnByValue > +struct permut_matrix_product_retval + : public ReturnByValue > { - typedef typename ei_cleantype::type MatrixTypeNestedCleaned; + typedef typename remove_all::type MatrixTypeNestedCleaned; - ei_permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix) + permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix) : m_permutation(perm), m_matrix(matrix) {} @@ -346,7 +564,7 @@ struct ei_permut_matrix_product_retval { const int n = Side==OnTheLeft ? rows() : cols(); - if(ei_is_same_type::ret && ei_extract_data(dst) == ei_extract_data(m_matrix)) + if(is_same::value && extract_data(dst) == extract_data(m_matrix)) { // apply the permutation inplace Matrix mask(m_permutation.size()); @@ -382,7 +600,7 @@ struct ei_permut_matrix_product_retval = - Block + Block (m_matrix, ((Side==OnTheRight) ^ Transposed) ? m_permutation.indices().coeff(i) : i); } } @@ -395,23 +613,25 @@ struct ei_permut_matrix_product_retval /* Template partial specialization for transposed/inverse permutations */ -template -struct ei_traits > > - : ei_traits > +template +struct traits > > + : traits {}; -template -class Transpose > - : public EigenBase > > +} // end namespace internal + +template +class Transpose > + : public EigenBase > > { - typedef PermutationMatrix PermutationType; + typedef Derived PermutationType; typedef typename PermutationType::IndicesType IndicesType; + typedef typename PermutationType::PlainPermutationType PlainPermutationType; public: #ifndef EIGEN_PARSED_BY_DOXYGEN - typedef ei_traits Traits; - typedef Matrix - DenseMatrixType; + typedef internal::traits Traits; + typedef typename Derived::DenseMatrixType DenseMatrixType; enum { Flags = Traits::Flags, CoeffReadCost = Traits::CoeffReadCost, @@ -439,26 +659,26 @@ class Transpose > #endif /** \return the equivalent permutation matrix */ - PermutationType eval() const { return *this; } + PlainPermutationType eval() const { return *this; } DenseMatrixType toDenseMatrix() const { return *this; } /** \returns the matrix with the inverse permutation applied to the columns. */ - template friend - inline const ei_permut_matrix_product_retval - operator*(const MatrixBase& matrix, const Transpose& trPerm) + template friend + inline const internal::permut_matrix_product_retval + operator*(const MatrixBase& matrix, const Transpose& trPerm) { - return ei_permut_matrix_product_retval(trPerm.m_permutation, matrix.derived()); + return internal::permut_matrix_product_retval(trPerm.m_permutation, matrix.derived()); } /** \returns the matrix with the inverse permutation applied to the rows. */ - template - inline const ei_permut_matrix_product_retval - operator*(const MatrixBase& matrix) const + template + inline const internal::permut_matrix_product_retval + operator*(const MatrixBase& matrix) const { - return ei_permut_matrix_product_retval(m_permutation, matrix.derived()); + return internal::permut_matrix_product_retval(m_permutation, matrix.derived()); } const PermutationType& nestedPermutation() const { return m_permutation; } @@ -467,4 +687,10 @@ class Transpose > const PermutationType& m_permutation; }; +template +const PermutationWrapper MatrixBase::asPermutation() const +{ + return derived(); +} + #endif // EIGEN_PERMUTATIONMATRIX_H diff --git a/gtsam/3rdparty/Eigen/src/Core/DenseStorageBase.h b/gtsam/3rdparty/Eigen/src/Core/PlainObjectBase.h similarity index 65% rename from gtsam/3rdparty/Eigen/src/Core/DenseStorageBase.h rename to gtsam/3rdparty/Eigen/src/Core/PlainObjectBase.h index 94b3e23a8..5358cb572 100644 --- a/gtsam/3rdparty/Eigen/src/Core/DenseStorageBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/PlainObjectBase.h @@ -32,25 +32,35 @@ # define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED #endif -template (Derived::IsVectorAtCompileTime)> struct ei_conservative_resize_like_impl; -template struct ei_matrix_swap_impl; +namespace internal { + +template (Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl; + +template struct matrix_swap_impl; + +} // end namespace internal /** * \brief %Dense storage base class for matrices and arrays. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. + * * \sa \ref TopicClassHierarchy */ template -class DenseStorageBase : public ei_dense_xpr_base::type +class PlainObjectBase : public internal::dense_xpr_base::type { public: - enum { Options = ei_traits::Options }; - typedef typename ei_dense_xpr_base::type Base; + enum { Options = internal::traits::Options }; + typedef typename internal::dense_xpr_base::type Base; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; + typedef Derived DenseType; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; @@ -61,13 +71,23 @@ class DenseStorageBase : public ei_dense_xpr_base::type using Base::IsVectorAtCompileTime; using Base::Flags; + template friend class Eigen::Map; friend class Eigen::Map; - typedef class Eigen::Map UnalignedMapType; + typedef Eigen::Map MapType; + friend class Eigen::Map; + typedef const Eigen::Map ConstMapType; friend class Eigen::Map; - typedef class Eigen::Map AlignedMapType; + typedef Eigen::Map AlignedMapType; + friend class Eigen::Map; + typedef const Eigen::Map ConstAlignedMapType; + template struct StridedMapType { typedef Eigen::Map type; }; + template struct StridedConstMapType { typedef Eigen::Map type; }; + template struct StridedAlignedMapType { typedef Eigen::Map type; }; + template struct StridedConstAlignedMapType { typedef Eigen::Map type; }; + protected: - ei_matrix_storage m_storage; + DenseStorage m_storage; public: enum { NeedsToAlign = (!(Options&DontAlign)) @@ -106,34 +126,51 @@ class DenseStorageBase : public ei_dense_xpr_base::type return m_storage.data()[index]; } + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const + { + if(Flags & RowMajorBit) + return m_storage.data()[col + row * m_storage.cols()]; + else // column-major + return m_storage.data()[row + col * m_storage.rows()]; + } + + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const + { + return m_storage.data()[index]; + } + + /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { - return ei_ploadt + return internal::ploadt (m_storage.data() + (Flags & RowMajorBit ? col + row * m_storage.cols() : row + col * m_storage.rows())); } + /** \internal */ template EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { - return ei_ploadt(m_storage.data() + index); + return internal::ploadt(m_storage.data() + index); } + /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x) { - ei_pstoret + internal::pstoret (m_storage.data() + (Flags & RowMajorBit ? col + row * m_storage.cols() : row + col * m_storage.rows()), x); } + /** \internal */ template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x) { - ei_pstoret(m_storage.data() + index, x); + internal::pstoret(m_storage.data() + index, x); } /** \returns a const pointer to the data array of this matrix */ @@ -185,8 +222,8 @@ class DenseStorageBase : public ei_dense_xpr_base::type */ inline void resize(Index size) { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(DenseStorageBase) - ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) + eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO bool size_changed = size != this->size(); #endif @@ -239,44 +276,58 @@ class DenseStorageBase : public ei_dense_xpr_base::type const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { - ei_assert(other.rows() == 1 || other.cols() == 1); + eigen_assert(other.rows() == 1 || other.cols() == 1); resize(1, othersize); } else if(ColsAtCompileTime == 1) { - ei_assert(other.rows() == 1 || other.cols() == 1); + eigen_assert(other.rows() == 1 || other.cols() == 1); resize(othersize, 1); } else resize(other.rows(), other.cols()); } - /** Resizes \c *this to a \a rows x \a cols matrix while leaving old values of \c *this untouched. + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * - * This method is intended for dynamic-size matrices. If you only want to change the number - * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index), + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * - * The top-left part of the resized matrix will be the same as the overlapping top-left corner - * of \c *this. In case values need to be appended to the matrix they will be uninitialized. + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will be uninitialized. */ EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) { - ei_conservative_resize_like_impl::run(*this, rows, cols); + internal::conservative_resize_like_impl::run(*this, rows, cols); } + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of columns unchanged. + * + * In case the matrix is growing, new rows will be uninitialized. + */ EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows, cols()); } + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of rows unchanged. + * + * In case the matrix is growing, new columns will be uninitialized. + */ EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows(), cols); } - /** Resizes \c *this to a vector of length \a size while retaining old values of *this. + /** Resizes the vector to \a size while retaining old values. * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other @@ -286,19 +337,28 @@ class DenseStorageBase : public ei_dense_xpr_base::type */ EIGEN_STRONG_INLINE void conservativeResize(Index size) { - ei_conservative_resize_like_impl::run(*this, size); + internal::conservative_resize_like_impl::run(*this, size); } + /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will copied from \c other. + */ template EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) { - ei_conservative_resize_like_impl::run(*this, other); + internal::conservative_resize_like_impl::run(*this, other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ - EIGEN_STRONG_INLINE Derived& operator=(const DenseStorageBase& other) + EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) { return _set(other); } @@ -318,7 +378,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type return Base::operator=(func); } - EIGEN_STRONG_INLINE explicit DenseStorageBase() : m_storage() + EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage() { // _check_template_params(); // EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED @@ -327,14 +387,14 @@ class DenseStorageBase : public ei_dense_xpr_base::type #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ? /** \internal */ - DenseStorageBase(ei_constructor_without_unaligned_array_assert) - : m_storage(ei_constructor_without_unaligned_array_assert()) + PlainObjectBase(internal::constructor_without_unaligned_array_assert) + : m_storage(internal::constructor_without_unaligned_array_assert()) { // _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED } #endif - EIGEN_STRONG_INLINE DenseStorageBase(Index size, Index rows, Index cols) + EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) : m_storage(size, rows, cols) { // _check_template_params(); @@ -353,7 +413,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type /** \sa MatrixBase::operator=(const EigenBase&) */ template - EIGEN_STRONG_INLINE DenseStorageBase(const EigenBase &other) + EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { _check_template_params(); @@ -371,31 +431,69 @@ class DenseStorageBase : public ei_dense_xpr_base::type * \see class Map */ //@{ - inline static const UnalignedMapType Map(const Scalar* data) - { return UnalignedMapType(data); } - inline static UnalignedMapType Map(Scalar* data) - { return UnalignedMapType(data); } - inline static const UnalignedMapType Map(const Scalar* data, Index size) - { return UnalignedMapType(data, size); } - inline static UnalignedMapType Map(Scalar* data, Index size) - { return UnalignedMapType(data, size); } - inline static const UnalignedMapType Map(const Scalar* data, Index rows, Index cols) - { return UnalignedMapType(data, rows, cols); } - inline static UnalignedMapType Map(Scalar* data, Index rows, Index cols) - { return UnalignedMapType(data, rows, cols); } + inline static ConstMapType Map(const Scalar* data) + { return ConstMapType(data); } + inline static MapType Map(Scalar* data) + { return MapType(data); } + inline static ConstMapType Map(const Scalar* data, Index size) + { return ConstMapType(data, size); } + inline static MapType Map(Scalar* data, Index size) + { return MapType(data, size); } + inline static ConstMapType Map(const Scalar* data, Index rows, Index cols) + { return ConstMapType(data, rows, cols); } + inline static MapType Map(Scalar* data, Index rows, Index cols) + { return MapType(data, rows, cols); } - inline static const AlignedMapType MapAligned(const Scalar* data) - { return AlignedMapType(data); } + inline static ConstAlignedMapType MapAligned(const Scalar* data) + { return ConstAlignedMapType(data); } inline static AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } - inline static const AlignedMapType MapAligned(const Scalar* data, Index size) - { return AlignedMapType(data, size); } + inline static ConstAlignedMapType MapAligned(const Scalar* data, Index size) + { return ConstAlignedMapType(data, size); } inline static AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } - inline static const AlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) - { return AlignedMapType(data, rows, cols); } + inline static ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) + { return ConstAlignedMapType(data, rows, cols); } inline static AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } + + template + inline static typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) + { return typename StridedConstMapType >::type(data, stride); } + template + inline static typename StridedMapType >::type Map(Scalar* data, const Stride& stride) + { return typename StridedMapType >::type(data, stride); } + template + inline static typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) + { return typename StridedConstMapType >::type(data, size, stride); } + template + inline static typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) + { return typename StridedMapType >::type(data, size, stride); } + template + inline static typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedConstMapType >::type(data, rows, cols, stride); } + template + inline static typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedMapType >::type(data, rows, cols, stride); } + + template + inline static typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, stride); } + template + inline static typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, stride); } + template + inline static typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, size, stride); } + template + inline static typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, size, stride); } + template + inline static typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } + template + inline static typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } //@} using Base::setConstant; @@ -414,8 +512,8 @@ class DenseStorageBase : public ei_dense_xpr_base::type Derived& setRandom(Index size); Derived& setRandom(Index rows, Index cols); - #ifdef EIGEN_DENSESTORAGEBASE_PLUGIN - #include EIGEN_DENSESTORAGEBASE_PLUGIN + #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN + #include EIGEN_PLAINOBJECTBASE_PLUGIN #endif protected: @@ -430,7 +528,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) { #ifdef EIGEN_NO_AUTOMATIC_RESIZING - ei_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) + eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) : (rows() == other.rows() && cols() == other.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); #else @@ -455,15 +553,15 @@ class DenseStorageBase : public ei_dense_xpr_base::type template EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) { - _set_selector(other.derived(), typename ei_meta_if(int(OtherDerived::Flags) & EvalBeforeAssigningBit), ei_meta_true, ei_meta_false>::ret()); + _set_selector(other.derived(), typename internal::conditional(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type()); return this->derived(); } template - EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_true&) { _set_noalias(other.eval()); } + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); } template - EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_false&) { _set_noalias(other); } + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); } /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. @@ -478,36 +576,36 @@ class DenseStorageBase : public ei_dense_xpr_base::type //_resize_to_match(other); // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because // it wouldn't allow to copy a row-vector into a column-vector. - return ei_assign_selector::run(this->derived(), other.derived()); + return internal::assign_selector::run(this->derived(), other.derived()); } template - EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename ei_enable_if::type* = 0) + EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if::type* = 0) { - ei_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); m_storage.resize(rows*cols,rows,cols); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED } template - EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename ei_enable_if::type* = 0) + EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if::type* = 0) { - EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(DenseStorageBase, 2) + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = x; m_storage.data()[1] = y; } template - friend struct ei_matrix_swap_impl; + friend struct internal::matrix_swap_impl; /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the * data pointers. */ template - void _swap(DenseBase EIGEN_REF_TO_TEMPORARY other) + void _swap(DenseBase const & other) { - enum { SwapPointers = ei_is_same_type::ret && Base::SizeAtCompileTime==Dynamic }; - ei_matrix_swap_impl::run(this->derived(), other.const_cast_derived()); + enum { SwapPointers = internal::is_same::value && Base::SizeAtCompileTime==Dynamic }; + internal::matrix_swap_impl::run(this->derived(), other.const_cast_derived()); } public: @@ -526,10 +624,13 @@ class DenseStorageBase : public ei_dense_xpr_base::type INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif + +private: + enum { ThisConstantIsPrivateInPlainObjectBase }; }; template -struct ei_conservative_resize_like_impl +struct internal::conservative_resize_like_impl { typedef typename Derived::Index Index; static void run(DenseBase& _this, Index rows, Index cols) @@ -588,8 +689,10 @@ struct ei_conservative_resize_like_impl } }; +namespace internal { + template -struct ei_conservative_resize_like_impl +struct conservative_resize_like_impl { typedef typename Derived::Index Index; static void run(DenseBase& _this, Index size) @@ -615,7 +718,7 @@ struct ei_conservative_resize_like_impl }; template -struct ei_matrix_swap_impl +struct matrix_swap_impl { static inline void run(MatrixTypeA& a, MatrixTypeB& b) { @@ -624,7 +727,7 @@ struct ei_matrix_swap_impl }; template -struct ei_matrix_swap_impl +struct matrix_swap_impl { static inline void run(MatrixTypeA& a, MatrixTypeB& b) { @@ -632,4 +735,6 @@ struct ei_matrix_swap_impl } }; +} // end namespace internal + #endif // EIGEN_DENSESTORAGEBASE_H diff --git a/gtsam/3rdparty/Eigen/src/Core/Product.h b/gtsam/3rdparty/Eigen/src/Core/Product.h index 8e82338e7..1363e83b6 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Product.h +++ b/gtsam/3rdparty/Eigen/src/Core/Product.h @@ -45,39 +45,57 @@ * * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) */ -template::value> +template::value> class GeneralProduct; -template struct ei_product_type_selector; - enum { Large = 2, Small = 3 }; -template struct ei_product_type +namespace internal { + +template struct product_type_selector; + +template struct product_size_category { - typedef typename ei_cleantype::type _Lhs; - typedef typename ei_cleantype::type _Rhs; + enum { is_large = MaxSize == Dynamic || + Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD, + value = is_large ? Large + : Size == 1 ? 1 + : Small + }; +}; + +template struct product_type +{ + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; enum { - Rows = _Lhs::MaxRowsAtCompileTime, - Cols = _Rhs::MaxColsAtCompileTime, - Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,_Rhs::MaxRowsAtCompileTime) + MaxRows = _Lhs::MaxRowsAtCompileTime, + Rows = _Lhs::RowsAtCompileTime, + MaxCols = _Rhs::MaxColsAtCompileTime, + Cols = _Rhs::ColsAtCompileTime, + MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime, + _Rhs::MaxRowsAtCompileTime), + Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime, + _Rhs::RowsAtCompileTime), + LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD }; // the splitting into different lines of code here, introducing the _select enums and the typedef below, // is to work around an internal compiler error with gcc 4.1 and 4.2. private: enum { - rows_select = Rows == Dynamic || Rows >=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ? Large : (Rows==1 ? 1 : Small), - cols_select = Cols == Dynamic || Cols >=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ? Large : (Cols==1 ? 1 : Small), - depth_select = Depth == Dynamic || Depth>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ? Large : (Depth==1 ? 1 : Small) + rows_select = product_size_category::value, + cols_select = product_size_category::value, + depth_select = product_size_category::value }; - typedef ei_product_type_selector product_type_selector; + typedef product_type_selector selector; public: enum { - value = product_type_selector::ret + value = selector::ret }; #ifdef EIGEN_DEBUG_PRODUCT static void debug() @@ -93,32 +111,35 @@ public: #endif }; + /* The following allows to select the kind of product at compile time * based on the three dimensions of the product. * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ // FIXME I'm not sure the current mapping is the ideal one. -template struct ei_product_type_selector { enum { ret = OuterProduct }; }; -template struct ei_product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; -template<> struct ei_product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; -template<> struct ei_product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; -template<> struct ei_product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; -template<> struct ei_product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = GemvProduct }; }; -template<> struct ei_product_type_selector { enum { ret = CoeffBasedProductMode }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; -template<> struct ei_product_type_selector { enum { ret = GemmProduct }; }; +template struct product_type_selector { enum { ret = OuterProduct }; }; +template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; +template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemvProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; + +} // end namespace internal /** \class ProductReturnType * \ingroup Core_Module @@ -127,7 +148,7 @@ template<> struct ei_product_type_selector { en * * \param Lhs the type of the left-hand side * \param Rhs the type of the right-hand side - * \param ProductMode the type of the product (determined automatically by ei_product_mode) + * \param ProductMode the type of the product (determined automatically by internal::product_mode) * * This class defines the typename Type representing the optimized product expression * between two matrix expressions. In practice, using ProductReturnType::Type @@ -141,8 +162,8 @@ template struct ProductReturnType { // TODO use the nested type to reduce instanciations ???? -// typedef typename ei_nested::type LhsNested; -// typedef typename ei_nested::type RhsNested; +// typedef typename internal::nested::type LhsNested; +// typedef typename internal::nested::type RhsNested; typedef GeneralProduct Type; }; @@ -150,16 +171,16 @@ struct ProductReturnType template struct ProductReturnType { - typedef typename ei_nested::type >::type LhsNested; - typedef typename ei_nested::type >::type RhsNested; + typedef typename internal::nested::type >::type LhsNested; + typedef typename internal::nested::type >::type RhsNested; typedef CoeffBasedProduct Type; }; template struct ProductReturnType { - typedef typename ei_nested::type >::type LhsNested; - typedef typename ei_nested::type >::type RhsNested; + typedef typename internal::nested::type >::type LhsNested; + typedef typename internal::nested::type >::type RhsNested; typedef CoeffBasedProduct Type; }; @@ -179,28 +200,30 @@ struct LazyProductReturnType : public ProductReturnType with: operator=(Scalar x); +namespace internal { + template -struct ei_traits > - : ei_traits::ReturnType,1,1> > +struct traits > + : traits::ReturnType,1,1> > {}; +} + template class GeneralProduct - : ei_no_assignment_operator, - public Matrix::ReturnType,1,1> + : internal::no_assignment_operator, + public Matrix::ReturnType,1,1> { - typedef Matrix::ReturnType,1,1> Base; + typedef Matrix::ReturnType,1,1> Base; public: GeneralProduct(const Lhs& lhs, const Rhs& rhs) { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); } - typename Base::Scalar value() const { return Base::coeff(0,0); } - /** Convertion to scalar */ operator const typename Base::Scalar() const { return Base::coeff(0,0); @@ -210,13 +233,17 @@ class GeneralProduct /*********************************************************************** * Implementation of Outer Vector Vector Product ***********************************************************************/ -template struct ei_outer_product_selector; + +namespace internal { +template struct outer_product_selector; template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; +} + template class GeneralProduct : public ProductBase, Lhs, Rhs> @@ -226,17 +253,19 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) } template void scaleAndAddTo(Dest& dest, Scalar alpha) const { - ei_outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha); + internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha); } }; -template<> struct ei_outer_product_selector { +namespace internal { + +template<> struct outer_product_selector { template static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { typedef typename Dest::Index Index; @@ -248,7 +277,7 @@ template<> struct ei_outer_product_selector { } }; -template<> struct ei_outer_product_selector { +template<> struct outer_product_selector { template static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { typedef typename Dest::Index Index; @@ -260,6 +289,8 @@ template<> struct ei_outer_product_selector { } }; +} // end namespace internal + /*********************************************************************** * Implementation of General Matrix Vector Product ***********************************************************************/ @@ -271,13 +302,17 @@ template<> struct ei_outer_product_selector { * Therefore we need a lower level meta selector. * Furthermore, if the matrix is the rhs, then the product has to be transposed. */ +namespace internal { + template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; template -struct ei_gemv_selector; +struct gemv_selector; + +} // end namespace internal template class GeneralProduct @@ -291,40 +326,63 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { -// EIGEN_STATIC_ASSERT((ei_is_same_type::ret), +// EIGEN_STATIC_ASSERT((internal::is_same::value), // YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) } enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; - typedef typename ei_meta_if::ret MatrixType; + typedef typename internal::conditional::type MatrixType; template void scaleAndAddTo(Dest& dst, Scalar alpha) const { - ei_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); - ei_gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); + eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); + internal::gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); } }; +namespace internal { + // The vector is on the left => transposition template -struct ei_gemv_selector +struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { Transpose destT(dest); enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; - ei_gemv_selector - ::run(GeneralProduct,Transpose, GemvProduct> + gemv_selector + ::run(GeneralProduct,Transpose, GemvProduct> (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha); } }; -template<> struct ei_gemv_selector +template struct gemv_static_vector_if; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } +}; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE Scalar* data() { return 0; } +}; + +template +struct gemv_static_vector_if +{ + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } +}; + +template<> struct gemv_selector { template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { typedef typename ProductType::Index Index; typedef typename ProductType::LhsScalar LhsScalar; @@ -337,60 +395,73 @@ template<> struct ei_gemv_selector typedef typename ProductType::RhsBlasTraits RhsBlasTraits; typedef Map, Aligned> MappedDest; - ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); - ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); + const ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); + const ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) * RhsBlasTraits::extractScalarFactor(prod.rhs()); enum { - // FIXME find a way to allow an inner stride on the result if ei_packet_traits::size==1 + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, - ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex) + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal }; - bool alphaIsCompatible = (!ComplexByReal) || (ei_imag(actualAlpha)==RealScalar(0)); + gemv_static_vector_if static_dest; + + bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0)); bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; - RhsScalar compatibleAlpha = ei_get_factor::run(actualAlpha); + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); - ResScalar* actualDest; + ResScalar* actualDestPtr; + bool freeDestPtr = false; if (evalToDest) { - actualDest = &dest.coeffRef(0); + actualDestPtr = &dest.coeffRef(0); } else { - actualDest = ei_aligned_stack_new(ResScalar,dest.size()); + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if((actualDestPtr = static_dest.data())==0) + { + freeDestPtr = true; + actualDestPtr = ei_aligned_stack_new(ResScalar,dest.size()); + } if(!alphaIsCompatible) { - MappedDest(actualDest, dest.size()).setZero(); + MappedDest(actualDestPtr, dest.size()).setZero(); compatibleAlpha = RhsScalar(1); } else - MappedDest(actualDest, dest.size()) = dest; + MappedDest(actualDestPtr, dest.size()) = dest; } - ei_general_matrix_vector_product + general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), - &actualLhs.const_cast_derived().coeffRef(0,0), actualLhs.outerStride(), + &actualLhs.coeffRef(0,0), actualLhs.outerStride(), actualRhs.data(), actualRhs.innerStride(), - actualDest, 1, + actualDestPtr, 1, compatibleAlpha); if (!evalToDest) { if(!alphaIsCompatible) - dest += actualAlpha * MappedDest(actualDest, dest.size()); + dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); else - dest = MappedDest(actualDest, dest.size()); - ei_aligned_stack_delete(ResScalar, actualDest, dest.size()); + dest = MappedDest(actualDestPtr, dest.size()); + if(freeDestPtr) ei_aligned_stack_delete(ResScalar, actualDestPtr, dest.size()); } } }; -template<> struct ei_gemv_selector +template<> struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) @@ -405,41 +476,53 @@ template<> struct ei_gemv_selector typedef typename ProductType::LhsBlasTraits LhsBlasTraits; typedef typename ProductType::RhsBlasTraits RhsBlasTraits; - ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); - ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); + typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) * RhsBlasTraits::extractScalarFactor(prod.rhs()); enum { - // FIXME I think here we really have to check for ei_packet_traits::size==1 - // because in this case it is fine to have an inner stride - DirectlyUseRhs = ((ei_packet_traits::size==1) || (_ActualRhsType::Flags&ActualPacketAccessBit)) - && (!(_ActualRhsType::Flags & RowMajorBit)) + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 }; - RhsScalar* rhs_data; + gemv_static_vector_if static_rhs; + + RhsScalar* actualRhsPtr; + bool freeRhsPtr = false; if (DirectlyUseRhs) - rhs_data = &actualRhs.const_cast_derived().coeffRef(0); + { + actualRhsPtr = const_cast(&actualRhs.coeffRef(0)); + } else { - rhs_data = ei_aligned_stack_new(RhsScalar, actualRhs.size()); - Map(rhs_data, actualRhs.size()) = actualRhs; + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if((actualRhsPtr = static_rhs.data())==0) + { + freeRhsPtr = true; + actualRhsPtr = ei_aligned_stack_new(RhsScalar, actualRhs.size()); + } + Map(actualRhsPtr, actualRhs.size()) = actualRhs; } - ei_general_matrix_vector_product + general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), - &actualLhs.const_cast_derived().coeffRef(0,0), actualLhs.outerStride(), - rhs_data, 1, + &actualLhs.coeffRef(0,0), actualLhs.outerStride(), + actualRhsPtr, 1, &dest.coeffRef(0,0), dest.innerStride(), actualAlpha); - if (!DirectlyUseRhs) ei_aligned_stack_delete(RhsScalar, rhs_data, prod.rhs().size()); + if((!DirectlyUseRhs) && freeRhsPtr) ei_aligned_stack_delete(RhsScalar, actualRhsPtr, prod.rhs().size()); } }; -template<> struct ei_gemv_selector +template<> struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) @@ -452,7 +535,7 @@ template<> struct ei_gemv_selector } }; -template<> struct ei_gemv_selector +template<> struct gemv_selector { template static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) @@ -465,6 +548,8 @@ template<> struct ei_gemv_selector } }; +} // end namespace internal + /*************************************************************************** * Implementation of matrix base methods ***************************************************************************/ @@ -481,7 +566,7 @@ inline const typename ProductReturnType::Type MatrixBase::operator*(const MatrixBase &other) const { // A note regarding the function declaration: In MSVC, this function will sometimes - // not be inlined since ei_matrix_storage is an unwindable object for dynamic + // not be inlined since DenseStorage is an unwindable object for dynamic // matrices and product types are holding a member to store the result. // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. enum { @@ -500,7 +585,7 @@ MatrixBase::operator*(const MatrixBase &other) const INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) #ifdef EIGEN_DEBUG_PRODUCT - ei_product_type::debug(); + internal::product_type::debug(); #endif return typename ProductReturnType::Type(derived(), other.derived()); } diff --git a/gtsam/3rdparty/Eigen/src/Core/ProductBase.h b/gtsam/3rdparty/Eigen/src/Core/ProductBase.h index bc2a0f743..3bd3487d6 100644 --- a/gtsam/3rdparty/Eigen/src/Core/ProductBase.h +++ b/gtsam/3rdparty/Eigen/src/Core/ProductBase.h @@ -29,29 +29,32 @@ * \ingroup Core_Module * */ + +namespace internal { template -struct ei_traits > +struct traits > { typedef MatrixXpr XprKind; - typedef typename ei_cleantype<_Lhs>::type Lhs; - typedef typename ei_cleantype<_Rhs>::type Rhs; - typedef typename ei_scalar_product_traits::ReturnType Scalar; - typedef typename ei_promote_storage_type::StorageKind, - typename ei_traits::StorageKind>::ret StorageKind; - typedef typename ei_promote_index_type::Index, - typename ei_traits::Index>::type Index; + typedef typename remove_all<_Lhs>::type Lhs; + typedef typename remove_all<_Rhs>::type Rhs; + typedef typename scalar_product_traits::ReturnType Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; enum { - RowsAtCompileTime = ei_traits::RowsAtCompileTime, - ColsAtCompileTime = ei_traits::ColsAtCompileTime, - MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, - MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime, + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime, Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0) | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit, // Note that EvalBeforeNestingBit and NestByRefBit - // are not used in practice because ei_nested is overloaded for products + // are not used in practice because nested is overloaded for products CoeffReadCost = 0 // FIXME why is it needed ? }; }; +} #define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \ typedef ProductBase Base; \ @@ -75,18 +78,20 @@ class ProductBase : public MatrixBase public: typedef MatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase) - protected: + typedef typename Lhs::Nested LhsNested; - typedef typename ei_cleantype::type _LhsNested; - typedef ei_blas_traits<_LhsNested> LhsBlasTraits; + typedef typename internal::remove_all::type _LhsNested; + typedef internal::blas_traits<_LhsNested> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; - typedef typename ei_cleantype::type _ActualLhsType; + typedef typename internal::remove_all::type _ActualLhsType; + typedef typename internal::traits::Scalar LhsScalar; typedef typename Rhs::Nested RhsNested; - typedef typename ei_cleantype::type _RhsNested; - typedef ei_blas_traits<_RhsNested> RhsBlasTraits; + typedef typename internal::remove_all::type _RhsNested; + typedef internal::blas_traits<_RhsNested> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; - typedef typename ei_cleantype::type _ActualRhsType; + typedef typename internal::remove_all::type _ActualRhsType; + typedef typename internal::traits::Scalar RhsScalar; // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once typedef CoeffBasedProduct FullyLazyCoeffBaseProductType; @@ -98,7 +103,7 @@ class ProductBase : public MatrixBase ProductBase(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { - ei_assert(lhs.cols() == rhs.rows() + eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } @@ -129,7 +134,7 @@ class ProductBase : public MatrixBase return m_result; } - const Diagonal diagonal() const + const Diagonal diagonal() const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } template @@ -139,29 +144,56 @@ class ProductBase : public MatrixBase const Diagonal diagonal(Index index) const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } + // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression + typename Base::CoeffReturnType coeff(Index row, Index col) const + { +#ifdef EIGEN2_SUPPORT + return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum(); +#else + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeff(row,col); +#endif + } + + typename Base::CoeffReturnType coeff(Index i) const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeff(i); + } + + const Scalar& coeffRef(Index row, Index col) const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeffRef(row,col); + } + + const Scalar& coeffRef(Index i) const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeffRef(i); + } + protected: const LhsNested m_lhs; const RhsNested m_rhs; mutable PlainObject m_result; - - private: - - // discard coeff methods - void coeff(Index,Index) const; - void coeffRef(Index,Index); - void coeff(Index) const; - void coeffRef(Index); }; // here we need to overload the nested rule for products // such that the nested type is a const reference to a plain matrix +namespace internal { template -struct ei_nested, N, PlainObject> +struct nested, N, PlainObject> { typedef PlainObject const& type; }; +} template class ScaledProduct; @@ -178,7 +210,7 @@ operator*(const ProductBase& prod, typename Derived::Scalar x) { return ScaledProduct(prod.derived(), x); } template -typename ei_enable_if::ret, +typename internal::enable_if::value, const ScaledProduct >::type operator*(const ProductBase& prod, typename Derived::RealScalar x) { return ScaledProduct(prod.derived(), x); } @@ -190,20 +222,21 @@ operator*(typename Derived::Scalar x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } template -typename ei_enable_if::ret, +typename internal::enable_if::value, const ScaledProduct >::type operator*(typename Derived::RealScalar x,const ProductBase& prod) { return ScaledProduct(prod.derived(), x); } - +namespace internal { template -struct ei_traits > - : ei_traits, +struct traits > + : traits, typename NestedProduct::_LhsNested, typename NestedProduct::_RhsNested> > { - typedef typename ei_traits::StorageKind StorageKind; + typedef typename traits::StorageKind StorageKind; }; +} template class ScaledProduct @@ -233,6 +266,8 @@ class ScaledProduct template inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha); } + + const Scalar& alpha() const { return m_alpha; } protected: const NestedProduct& m_prod; diff --git a/gtsam/3rdparty/Eigen/src/Core/Random.h b/gtsam/3rdparty/Eigen/src/Core/Random.h index 06a20fed0..b7d90103a 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Random.h +++ b/gtsam/3rdparty/Eigen/src/Core/Random.h @@ -25,15 +25,20 @@ #ifndef EIGEN_RANDOM_H #define EIGEN_RANDOM_H -template struct ei_scalar_random_op { - EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_random_op) +namespace internal { + +template struct scalar_random_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op) template - inline const Scalar operator() (Index, Index = 0) const { return ei_random(); } + inline const Scalar operator() (Index, Index = 0) const { return random(); } }; + template -struct ei_functor_traits > +struct functor_traits > { enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false, IsRepeatable = false }; }; +} // end namespace internal + /** \returns a random matrix expression * * The parameters \a rows and \a cols are the number of rows and of columns of @@ -53,10 +58,10 @@ struct ei_functor_traits > * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random() */ template -inline const CwiseNullaryOp::Scalar>, Derived> +inline const CwiseNullaryOp::Scalar>, Derived> DenseBase::Random(Index rows, Index cols) { - return NullaryExpr(rows, cols, ei_scalar_random_op()); + return NullaryExpr(rows, cols, internal::scalar_random_op()); } /** \returns a random vector expression @@ -80,10 +85,10 @@ DenseBase::Random(Index rows, Index cols) * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random() */ template -inline const CwiseNullaryOp::Scalar>, Derived> +inline const CwiseNullaryOp::Scalar>, Derived> DenseBase::Random(Index size) { - return NullaryExpr(size, ei_scalar_random_op()); + return NullaryExpr(size, internal::scalar_random_op()); } /** \returns a fixed-size random matrix or vector expression @@ -101,10 +106,10 @@ DenseBase::Random(Index size) * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index) */ template -inline const CwiseNullaryOp::Scalar>, Derived> +inline const CwiseNullaryOp::Scalar>, Derived> DenseBase::Random() { - return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_random_op()); + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op()); } /** Sets all coefficients in this expression to random values. @@ -131,7 +136,7 @@ inline Derived& DenseBase::setRandom() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setRandom(Index size) +PlainObjectBase::setRandom(Index size) { resize(size); return setRandom(); @@ -149,7 +154,7 @@ DenseStorageBase::setRandom(Index size) */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setRandom(Index rows, Index cols) +PlainObjectBase::setRandom(Index rows, Index cols) { resize(rows, cols); return setRandom(); diff --git a/gtsam/3rdparty/Eigen/src/Core/Redux.h b/gtsam/3rdparty/Eigen/src/Core/Redux.h index 504a51229..f9f5a95d5 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Redux.h +++ b/gtsam/3rdparty/Eigen/src/Core/Redux.h @@ -26,6 +26,8 @@ #ifndef EIGEN_REDUX_H #define EIGEN_REDUX_H +namespace internal { + // TODO // * implement other kind of vectorization // * factorize code @@ -35,11 +37,11 @@ ***************************************************************************/ template -struct ei_redux_traits +struct redux_traits { public: enum { - PacketSize = ei_packet_traits::size, + PacketSize = packet_traits::size, InnerMaxSize = int(Derived::IsRowMajor) ? Derived::MaxColsAtCompileTime : Derived::MaxRowsAtCompileTime @@ -47,7 +49,7 @@ public: enum { MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit) - && (ei_functor_traits::PacketAccess), + && (functor_traits::PacketAccess), MayLinearVectorize = MightVectorize && (int(Derived::Flags)&LinearAccessBit), MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize }; @@ -63,10 +65,10 @@ public: enum { Cost = ( Derived::SizeAtCompileTime == Dynamic || Derived::CoeffReadCost == Dynamic - || (Derived::SizeAtCompileTime!=1 && ei_functor_traits::Cost == Dynamic) + || (Derived::SizeAtCompileTime!=1 && functor_traits::Cost == Dynamic) ) ? Dynamic : Derived::SizeAtCompileTime * Derived::CoeffReadCost - + (Derived::SizeAtCompileTime-1) * ei_functor_traits::Cost, + + (Derived::SizeAtCompileTime-1) * functor_traits::Cost, UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) }; @@ -85,7 +87,7 @@ public: /*** no vectorization ***/ template -struct ei_redux_novec_unroller +struct redux_novec_unroller { enum { HalfLength = Length/2 @@ -95,13 +97,13 @@ struct ei_redux_novec_unroller EIGEN_STRONG_INLINE static Scalar run(const Derived &mat, const Func& func) { - return func(ei_redux_novec_unroller::run(mat,func), - ei_redux_novec_unroller::run(mat,func)); + return func(redux_novec_unroller::run(mat,func), + redux_novec_unroller::run(mat,func)); } }; template -struct ei_redux_novec_unroller +struct redux_novec_unroller { enum { outer = Start / Derived::InnerSizeAtCompileTime, @@ -120,7 +122,7 @@ struct ei_redux_novec_unroller // to prevent false warnings regarding failed inlining though // for 0 length run() will never be called at all. template -struct ei_redux_novec_unroller +struct redux_novec_unroller { typedef typename Derived::Scalar Scalar; EIGEN_STRONG_INLINE static Scalar run(const Derived&, const Func&) { return Scalar(); } @@ -129,36 +131,36 @@ struct ei_redux_novec_unroller /*** vectorization ***/ template -struct ei_redux_vec_unroller +struct redux_vec_unroller { enum { - PacketSize = ei_packet_traits::size, + PacketSize = packet_traits::size, HalfLength = Length/2 }; typedef typename Derived::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename packet_traits::type PacketScalar; EIGEN_STRONG_INLINE static PacketScalar run(const Derived &mat, const Func& func) { return func.packetOp( - ei_redux_vec_unroller::run(mat,func), - ei_redux_vec_unroller::run(mat,func) ); + redux_vec_unroller::run(mat,func), + redux_vec_unroller::run(mat,func) ); } }; template -struct ei_redux_vec_unroller +struct redux_vec_unroller { enum { - index = Start * ei_packet_traits::size, + index = Start * packet_traits::size, outer = index / int(Derived::InnerSizeAtCompileTime), inner = index % int(Derived::InnerSizeAtCompileTime), alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned }; typedef typename Derived::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename packet_traits::type PacketScalar; EIGEN_STRONG_INLINE static PacketScalar run(const Derived &mat, const Func&) { @@ -171,19 +173,19 @@ struct ei_redux_vec_unroller ***************************************************************************/ template::Traversal, - int Unrolling = ei_redux_traits::Unrolling + int Traversal = redux_traits::Traversal, + int Unrolling = redux_traits::Unrolling > -struct ei_redux_impl; +struct redux_impl; template -struct ei_redux_impl +struct redux_impl { typedef typename Derived::Scalar Scalar; typedef typename Derived::Index Index; static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func) { - ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); + eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); Scalar res; res = mat.coeffByOuterInner(0, 0); for(Index i = 1; i < mat.innerSize(); ++i) @@ -196,25 +198,25 @@ struct ei_redux_impl }; template -struct ei_redux_impl - : public ei_redux_novec_unroller +struct redux_impl + : public redux_novec_unroller {}; template -struct ei_redux_impl +struct redux_impl { typedef typename Derived::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename packet_traits::type PacketScalar; typedef typename Derived::Index Index; static Scalar run(const Derived& mat, const Func& func) { const Index size = mat.size(); - ei_assert(size && "you are using an empty matrix"); - const Index packetSize = ei_packet_traits::size; - const Index alignedStart = ei_first_aligned(mat); + eigen_assert(size && "you are using an empty matrix"); + const Index packetSize = packet_traits::size; + const Index alignedStart = first_aligned(mat); enum { - alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit) + alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit) ? Aligned : Unaligned }; const Index alignedSize = ((size-alignedStart)/packetSize)*packetSize; @@ -246,19 +248,19 @@ struct ei_redux_impl }; template -struct ei_redux_impl +struct redux_impl { typedef typename Derived::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename packet_traits::type PacketScalar; typedef typename Derived::Index Index; static Scalar run(const Derived& mat, const Func& func) { - ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); + eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); const Index innerSize = mat.innerSize(); const Index outerSize = mat.outerSize(); enum { - packetSize = ei_packet_traits::size + packetSize = packet_traits::size }; const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; Scalar res; @@ -277,7 +279,7 @@ struct ei_redux_impl else // too small to vectorize anything. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. { - res = ei_redux_impl::run(mat, func); + res = redux_impl::run(mat, func); } return res; @@ -285,25 +287,31 @@ struct ei_redux_impl }; template -struct ei_redux_impl +struct redux_impl { typedef typename Derived::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; + typedef typename packet_traits::type PacketScalar; enum { - PacketSize = ei_packet_traits::size, + PacketSize = packet_traits::size, Size = Derived::SizeAtCompileTime, VectorizedSize = (Size / PacketSize) * PacketSize }; EIGEN_STRONG_INLINE static Scalar run(const Derived& mat, const Func& func) { - ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); - Scalar res = func.predux(ei_redux_vec_unroller::run(mat,func)); + eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); + Scalar res = func.predux(redux_vec_unroller::run(mat,func)); if (VectorizedSize != Size) - res = func(res,ei_redux_novec_unroller::run(mat,func)); + res = func(res,redux_novec_unroller::run(mat,func)); return res; } }; +} // end namespace internal + +/*************************************************************************** +* Part 4 : public API +***************************************************************************/ + /** \returns the result of a full redux operation on the whole matrix or vector using \a func * @@ -314,30 +322,30 @@ struct ei_redux_impl template -EIGEN_STRONG_INLINE typename ei_result_of::Scalar)>::type +EIGEN_STRONG_INLINE typename internal::result_of::Scalar)>::type DenseBase::redux(const Func& func) const { - typedef typename ei_cleantype::type ThisNested; - return ei_redux_impl + typedef typename internal::remove_all::type ThisNested; + return internal::redux_impl ::run(derived(), func); } /** \returns the minimum of all coefficients of *this */ template -EIGEN_STRONG_INLINE typename ei_traits::Scalar +EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::minCoeff() const { - return this->redux(Eigen::ei_scalar_min_op()); + return this->redux(Eigen::internal::scalar_min_op()); } /** \returns the maximum of all coefficients of *this */ template -EIGEN_STRONG_INLINE typename ei_traits::Scalar +EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::maxCoeff() const { - return this->redux(Eigen::ei_scalar_max_op()); + return this->redux(Eigen::internal::scalar_max_op()); } /** \returns the sum of all coefficients of *this @@ -345,12 +353,12 @@ DenseBase::maxCoeff() const * \sa trace(), prod(), mean() */ template -EIGEN_STRONG_INLINE typename ei_traits::Scalar +EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::sum() const { if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) return Scalar(0); - return this->redux(Eigen::ei_scalar_sum_op()); + return this->redux(Eigen::internal::scalar_sum_op()); } /** \returns the mean of all coefficients of *this @@ -358,10 +366,10 @@ DenseBase::sum() const * \sa trace(), prod(), sum() */ template -EIGEN_STRONG_INLINE typename ei_traits::Scalar +EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::mean() const { - return Scalar(this->redux(Eigen::ei_scalar_sum_op())) / Scalar(this->size()); + return Scalar(this->redux(Eigen::internal::scalar_sum_op())) / Scalar(this->size()); } /** \returns the product of all coefficients of *this @@ -372,12 +380,12 @@ DenseBase::mean() const * \sa sum(), mean(), trace() */ template -EIGEN_STRONG_INLINE typename ei_traits::Scalar +EIGEN_STRONG_INLINE typename internal::traits::Scalar DenseBase::prod() const { if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) return Scalar(1); - return this->redux(Eigen::ei_scalar_product_op()); + return this->redux(Eigen::internal::scalar_product_op()); } /** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. @@ -387,7 +395,7 @@ DenseBase::prod() const * \sa diagonal(), sum() */ template -EIGEN_STRONG_INLINE typename ei_traits::Scalar +EIGEN_STRONG_INLINE typename internal::traits::Scalar MatrixBase::trace() const { return derived().diagonal().sum(); diff --git a/gtsam/3rdparty/Eigen/src/Core/Replicate.h b/gtsam/3rdparty/Eigen/src/Core/Replicate.h index 87dea0533..d2f9712db 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Replicate.h +++ b/gtsam/3rdparty/Eigen/src/Core/Replicate.h @@ -39,15 +39,17 @@ * * \sa DenseBase::replicate() */ + +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { typedef typename MatrixType::Scalar Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::XprKind XprKind; - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; enum { RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic ? Dynamic @@ -65,29 +67,30 @@ struct ei_traits > CoeffReadCost = _MatrixTypeNested::CoeffReadCost }; }; +} template class Replicate - : public ei_dense_xpr_base< Replicate >::type + : public internal::dense_xpr_base< Replicate >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) template inline explicit Replicate(const OriginalMatrixType& matrix) : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) - ei_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); + eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); } template inline Replicate(const OriginalMatrixType& matrix, int rowFactor, int colFactor) : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) } @@ -97,10 +100,10 @@ template class Replicate inline Scalar coeff(Index row, Index col) const { // try to avoid using modulo; this is a pure optimization strategy - const Index actual_row = ei_traits::RowsAtCompileTime==1 ? 0 + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row%m_matrix.rows(); - const Index actual_col = ei_traits::ColsAtCompileTime==1 ? 0 + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col%m_matrix.cols(); @@ -109,10 +112,10 @@ template class Replicate template inline PacketScalar packet(Index row, Index col) const { - const Index actual_row = ei_traits::RowsAtCompileTime==1 ? 0 + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row%m_matrix.rows(); - const Index actual_col = ei_traits::ColsAtCompileTime==1 ? 0 + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col%m_matrix.cols(); @@ -122,8 +125,8 @@ template class Replicate protected: const typename MatrixType::Nested m_matrix; - const ei_variable_if_dynamic m_rowFactor; - const ei_variable_if_dynamic m_colFactor; + const internal::variable_if_dynamic m_rowFactor; + const internal::variable_if_dynamic m_colFactor; }; /** diff --git a/gtsam/3rdparty/Eigen/src/Core/ReturnByValue.h b/gtsam/3rdparty/Eigen/src/Core/ReturnByValue.h index 82f194b56..24c5a4e21 100644 --- a/gtsam/3rdparty/Eigen/src/Core/ReturnByValue.h +++ b/gtsam/3rdparty/Eigen/src/Core/ReturnByValue.h @@ -30,43 +30,50 @@ * \ingroup Core_Module * */ + +namespace internal { + template -struct ei_traits > - : public ei_traits::ReturnType> +struct traits > + : public traits::ReturnType> { enum { // We're disabling the DirectAccess because e.g. the constructor of // the Block-with-DirectAccess expression requires to have a coeffRef method. // Also, we don't want to have to implement the stride stuff. - Flags = (ei_traits::ReturnType>::Flags + Flags = (traits::ReturnType>::Flags | EvalBeforeNestingBit) & ~DirectAccessBit }; }; /* The ReturnByValue object doesn't even have a coeff() method. * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. - * So ei_nested always gives the plain return matrix type. + * So internal::nested always gives the plain return matrix type. + * + * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? */ template -struct ei_nested, n, PlainObject> +struct nested, n, PlainObject> { - typedef typename ei_traits::ReturnType type; + typedef typename traits::ReturnType type; }; +} // end namespace internal + template class ReturnByValue - : public ei_dense_xpr_base< ReturnByValue >::type + : public internal::dense_xpr_base< ReturnByValue >::type { public: - typedef typename ei_traits::ReturnType ReturnType; + typedef typename internal::traits::ReturnType ReturnType; - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) template inline void evalTo(Dest& dst) const - { static_cast(this)->evalTo(dst); } - inline Index rows() const { return static_cast(this)->rows(); } - inline Index cols() const { return static_cast(this)->cols(); } + { static_cast(this)->evalTo(dst); } + inline Index rows() const { return static_cast(this)->rows(); } + inline Index cols() const { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT diff --git a/gtsam/3rdparty/Eigen/src/Core/Reverse.h b/gtsam/3rdparty/Eigen/src/Core/Reverse.h index abc44bde6..600744ae7 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Reverse.h +++ b/gtsam/3rdparty/Eigen/src/Core/Reverse.h @@ -40,15 +40,18 @@ * * \sa MatrixBase::reverse(), VectorwiseOp::reverse() */ + +namespace internal { + template -struct ei_traits > - : ei_traits +struct traits > + : traits { typedef typename MatrixType::Scalar Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::XprKind XprKind; - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, @@ -65,21 +68,24 @@ struct ei_traits > }; }; -template struct ei_reverse_packet_cond +template struct reverse_packet_cond { - static inline PacketScalar run(const PacketScalar& x) { return ei_preverse(x); } + static inline PacketScalar run(const PacketScalar& x) { return preverse(x); } }; -template struct ei_reverse_packet_cond + +template struct reverse_packet_cond { static inline PacketScalar run(const PacketScalar& x) { return x; } }; +} // end namespace internal + template class Reverse - : public ei_dense_xpr_base< Reverse >::type + : public internal::dense_xpr_base< Reverse >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) using Base::IsRowMajor; @@ -89,7 +95,7 @@ template class Reverse protected: enum { - PacketSize = ei_packet_traits::size, + PacketSize = internal::packet_traits::size, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), @@ -99,7 +105,7 @@ template class Reverse || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor) }; - typedef ei_reverse_packet_cond reverse_packet; + typedef internal::reverse_packet_cond reverse_packet; public: inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { } @@ -116,7 +122,7 @@ template class Reverse inline Scalar& operator()(Index row, Index col) { - ei_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return coeffRef(row, col); } @@ -144,7 +150,7 @@ template class Reverse inline Scalar& operator()(Index index) { - ei_assert(index >= 0 && index < m_matrix.size()); + eigen_assert(index >= 0 && index < m_matrix.size()); return coeffRef(index); } @@ -168,13 +174,13 @@ template class Reverse template inline const PacketScalar packet(Index index) const { - return ei_preverse(m_matrix.template packet( m_matrix.size() - index - PacketSize )); + return internal::preverse(m_matrix.template packet( m_matrix.size() - index - PacketSize )); } template inline void writePacket(Index index, const PacketScalar& x) { - m_matrix.const_cast_derived().template writePacket(m_matrix.size() - index - PacketSize, ei_preverse(x)); + m_matrix.const_cast_derived().template writePacket(m_matrix.size() - index - PacketSize, internal::preverse(x)); } protected: @@ -188,7 +194,7 @@ template class Reverse * */ template -inline Reverse +inline typename DenseBase::ReverseReturnType DenseBase::reverse() { return derived(); @@ -196,7 +202,7 @@ DenseBase::reverse() /** This is the const version of reverse(). */ template -inline const Reverse +inline const typename DenseBase::ConstReverseReturnType DenseBase::reverse() const { return derived(); @@ -210,7 +216,7 @@ DenseBase::reverse() const * the following additional features: * - less error prone: doing the same operation with .reverse() requires special care: * \code m = m.reverse().eval(); \endcode - * - no temporary object is created (currently there is one created but could be avoided using swap) + * - this API allows to avoid creating a temporary (the current implementation creates a temporary, but that could be avoided using swap) * - it allows future optimizations (cache friendliness, etc.) * * \sa reverse() */ diff --git a/gtsam/3rdparty/Eigen/src/Core/Select.h b/gtsam/3rdparty/Eigen/src/Core/Select.h index 000c70905..d0cd66a26 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Select.h +++ b/gtsam/3rdparty/Eigen/src/Core/Select.h @@ -40,13 +40,14 @@ * \sa DenseBase::select(const DenseBase&, const DenseBase&) const */ +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_traits::Scalar Scalar; + typedef typename traits::Scalar Scalar; typedef Dense StorageKind; - typedef typename ei_traits::XprKind XprKind; + typedef typename traits::XprKind XprKind; typedef typename ConditionMatrixType::Nested ConditionMatrixNested; typedef typename ThenMatrixType::Nested ThenMatrixNested; typedef typename ElseMatrixType::Nested ElseMatrixNested; @@ -56,19 +57,20 @@ struct ei_traits > MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits, - CoeffReadCost = ei_traits::type>::CoeffReadCost - + EIGEN_SIZE_MAX(ei_traits::type>::CoeffReadCost, - ei_traits::type>::CoeffReadCost) + CoeffReadCost = traits::type>::CoeffReadCost + + EIGEN_SIZE_MAX(traits::type>::CoeffReadCost, + traits::type>::CoeffReadCost) }; }; +} template -class Select : ei_no_assignment_operator, - public ei_dense_xpr_base< Select >::type +class Select : internal::no_assignment_operator, + public internal::dense_xpr_base< Select >::type { public: - typedef typename ei_dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Select) Select(const ConditionMatrixType& conditionMatrix, @@ -76,8 +78,8 @@ class Select : ei_no_assignment_operator, const ElseMatrixType& elseMatrix) : m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix) { - ei_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows()); - ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); + eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows()); + eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); } Index rows() const { return m_condition.rows(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/SelfAdjointView.h b/gtsam/3rdparty/Eigen/src/Core/SelfAdjointView.h index d72a94e15..0e9872bf5 100644 --- a/gtsam/3rdparty/Eigen/src/Core/SelfAdjointView.h +++ b/gtsam/3rdparty/Eigen/src/Core/SelfAdjointView.h @@ -40,19 +40,23 @@ * * \sa class TriangularBase, MatrixBase::selfAdjointView() */ + +namespace internal { template -struct ei_traits > : ei_traits +struct traits > : traits { - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_all::type MatrixTypeNestedCleaned; typedef MatrixType ExpressionType; + typedef typename MatrixType::PlainObject DenseMatrixType; enum { Mode = UpLo | SelfAdjoint, - Flags = _MatrixTypeNested::Flags & (HereditaryBits) + Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)), // FIXME these flags should be preserved - CoeffReadCost = _MatrixTypeNested::CoeffReadCost + CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost }; }; +} template @@ -65,19 +69,21 @@ template class SelfAdjointView public: typedef TriangularBase Base; + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::MatrixTypeNestedCleaned MatrixTypeNestedCleaned; /** \brief The type of coefficients in this matrix */ - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; typedef typename MatrixType::Index Index; enum { - Mode = ei_traits::Mode + Mode = internal::traits::Mode }; typedef typename MatrixType::PlainObject PlainObject; inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) - { ei_assert(ei_are_flags_consistent::ret); } + {} inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } @@ -103,10 +109,10 @@ template class SelfAdjointView } /** \internal */ - const MatrixType& _expression() const { return m_matrix; } + const MatrixTypeNestedCleaned& _expression() const { return m_matrix; } - const MatrixType& nestedExpression() const { return m_matrix; } - MatrixType& nestedExpression() { return const_cast(m_matrix); } + const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } + MatrixTypeNestedCleaned& nestedExpression() { return *const_cast(&m_matrix); } /** Efficient self-adjoint matrix times vector/matrix product */ template @@ -129,7 +135,7 @@ template class SelfAdjointView } /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this: - * \f$ this = this + \alpha ( u v^* + v u^*) \f$ + * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$ * \returns a reference to \c *this * * The vectors \a u and \c v \b must be column vectors, however they can be @@ -164,27 +170,52 @@ template class SelfAdjointView /** Real part of #Scalar */ typedef typename NumTraits::Real RealScalar; /** Return type of eigenvalues() */ - typedef Matrix::ColsAtCompileTime, 1> EigenvaluesReturnType; + typedef Matrix::ColsAtCompileTime, 1> EigenvaluesReturnType; EigenvaluesReturnType eigenvalues() const; RealScalar operatorNorm() const; + + #ifdef EIGEN2_SUPPORT + template + SelfAdjointView& operator=(const MatrixBase& other) + { + enum { + OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper + }; + m_matrix.const_cast_derived().template triangularView() = other; + m_matrix.const_cast_derived().template triangularView() = other.adjoint(); + return *this; + } + template + SelfAdjointView& operator=(const TriangularView& other) + { + enum { + OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper + }; + m_matrix.const_cast_derived().template triangularView() = other.toDenseMatrix(); + m_matrix.const_cast_derived().template triangularView() = other.toDenseMatrix().adjoint(); + return *this; + } + #endif protected: - const typename MatrixType::Nested m_matrix; + const MatrixTypeNested m_matrix; }; // template -// ei_selfadjoint_matrix_product_returntype > +// internal::selfadjoint_matrix_product_returntype > // operator*(const MatrixBase& lhs, const SelfAdjointView& rhs) // { -// return ei_matrix_selfadjoint_product_returntype >(lhs.derived(),rhs); +// return internal::matrix_selfadjoint_product_returntype >(lhs.derived(),rhs); // } // selfadjoint to dense matrix +namespace internal { + template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { enum { col = (UnrollCount-1) / Derived1::RowsAtCompileTime, @@ -193,23 +224,23 @@ struct ei_triangular_assignment_selector::run(dst, src); + triangular_assignment_selector::run(dst, src); if(row == col) - dst.coeffRef(row, col) = ei_real(src.coeff(row, col)); + dst.coeffRef(row, col) = real(src.coeff(row, col)); else if(row < col) - dst.coeffRef(col, row) = ei_conj(dst.coeffRef(row, col) = src.coeff(row, col)); + dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col)); } }; template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { inline static void run(Derived1 &, const Derived2 &) {} }; template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { enum { col = (UnrollCount-1) / Derived1::RowsAtCompileTime, @@ -218,23 +249,23 @@ struct ei_triangular_assignment_selector::run(dst, src); + triangular_assignment_selector::run(dst, src); if(row == col) - dst.coeffRef(row, col) = ei_real(src.coeff(row, col)); + dst.coeffRef(row, col) = real(src.coeff(row, col)); else if(row > col) - dst.coeffRef(col, row) = ei_conj(dst.coeffRef(row, col) = src.coeff(row, col)); + dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col)); } }; template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { inline static void run(Derived1 &, const Derived2 &) {} }; template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -244,7 +275,7 @@ struct ei_triangular_assignment_selector -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { inline static void run(Derived1 &dst, const Derived2 &src) { @@ -262,27 +293,31 @@ struct ei_triangular_assignment_selector template -const SelfAdjointView MatrixBase::selfadjointView() const +typename MatrixBase::template ConstSelfAdjointViewReturnType::Type +MatrixBase::selfadjointView() const { return derived(); } template template -SelfAdjointView MatrixBase::selfadjointView() +typename MatrixBase::template SelfAdjointViewReturnType::Type +MatrixBase::selfadjointView() { return derived(); } diff --git a/gtsam/3rdparty/Eigen/src/Core/SelfCwiseBinaryOp.h b/gtsam/3rdparty/Eigen/src/Core/SelfCwiseBinaryOp.h index f77589747..4e9ca8874 100644 --- a/gtsam/3rdparty/Eigen/src/Core/SelfCwiseBinaryOp.h +++ b/gtsam/3rdparty/Eigen/src/Core/SelfCwiseBinaryOp.h @@ -39,28 +39,31 @@ * * \sa class SwapWrapper for a similar trick. */ + +namespace internal { template -struct ei_traits > - : ei_traits > +struct traits > + : traits > { enum { // Note that it is still a good idea to preserve the DirectAccessBit // so that assign can correctly align the data. - Flags = ei_traits >::Flags | (Lhs::Flags&DirectAccessBit) | (Lhs::Flags&LvalueBit), + Flags = traits >::Flags | (Lhs::Flags&DirectAccessBit) | (Lhs::Flags&LvalueBit), OuterStrideAtCompileTime = Lhs::OuterStrideAtCompileTime, InnerStrideAtCompileTime = Lhs::InnerStrideAtCompileTime }; }; +} template class SelfCwiseBinaryOp - : public ei_dense_xpr_base< SelfCwiseBinaryOp >::type + : public internal::dense_xpr_base< SelfCwiseBinaryOp >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp) - typedef typename ei_packet_traits::type Packet; + typedef typename internal::packet_traits::type Packet; inline SelfCwiseBinaryOp(Lhs& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {} @@ -74,12 +77,22 @@ template class SelfCwiseBinaryOp // TODO make Assign use .data() inline Scalar& coeffRef(Index row, Index col) { + EIGEN_STATIC_ASSERT_LVALUE(Lhs) return m_matrix.const_cast_derived().coeffRef(row, col); } + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_matrix.coeffRef(row, col); + } // note that this function is needed by assign to correctly align loads/stores // TODO make Assign use .data() inline Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(Lhs) + return m_matrix.const_cast_derived().coeffRef(index); + } + inline const Scalar& coeffRef(Index index) const { return m_matrix.const_cast_derived().coeffRef(index); } @@ -88,7 +101,7 @@ template class SelfCwiseBinaryOp void copyCoeff(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); Scalar& tmp = m_matrix.coeffRef(row,col); tmp = m_functor(tmp, _other.coeff(row,col)); @@ -98,7 +111,7 @@ template class SelfCwiseBinaryOp void copyCoeff(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(index >= 0 && index < m_matrix.size()); + eigen_internal_assert(index >= 0 && index < m_matrix.size()); Scalar& tmp = m_matrix.coeffRef(index); tmp = m_functor(tmp, _other.coeff(index)); } @@ -107,7 +120,7 @@ template class SelfCwiseBinaryOp void copyPacket(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); m_matrix.template writePacket(row, col, m_functor.packetOp(m_matrix.template packet(row, col),_other.template packet(row, col)) ); @@ -117,7 +130,7 @@ template class SelfCwiseBinaryOp void copyPacket(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(index >= 0 && index < m_matrix.size()); + eigen_internal_assert(index >= 0 && index < m_matrix.size()); m_matrix.template writePacket(index, m_functor.packetOp(m_matrix.template packet(index),_other.template packet(index)) ); } @@ -131,10 +144,10 @@ template class SelfCwiseBinaryOp EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename RhsDerived::Scalar); #ifdef EIGEN_DEBUG_ASSIGN - ei_assign_traits::debug(); + internal::assign_traits::debug(); #endif - ei_assert(rows() == rhs.rows() && cols() == rhs.cols()); - ei_assign_impl::run(*this,rhs.derived()); + eigen_assert(rows() == rhs.rows() && cols() == rhs.cols()); + internal::assign_impl::run(*this,rhs.derived()); #ifndef EIGEN_NO_DEBUG this->checkTransposeAliasing(rhs.derived()); #endif @@ -146,7 +159,7 @@ template class SelfCwiseBinaryOp // at first... SelfCwiseBinaryOp& operator=(const Rhs& _rhs) { - typename ei_nested::type rhs(_rhs); + typename internal::nested::type rhs(_rhs); return Base::operator=(rhs); } @@ -162,7 +175,7 @@ template inline Derived& DenseBase::operator*=(const Scalar& other) { typedef typename Derived::PlainObject PlainObject; - SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); + SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); tmp = PlainObject::Constant(rows(),cols(),other); return derived(); } @@ -170,9 +183,9 @@ inline Derived& DenseBase::operator*=(const Scalar& other) template inline Derived& DenseBase::operator/=(const Scalar& other) { - typedef typename ei_meta_if::IsInteger, - ei_scalar_quotient_op, - ei_scalar_product_op >::ret BinOp; + typedef typename internal::conditional::IsInteger, + internal::scalar_quotient_op, + internal::scalar_product_op >::type BinOp; typedef typename Derived::PlainObject PlainObject; SelfCwiseBinaryOp tmp(derived()); tmp = PlainObject::Constant(rows(),cols(), NumTraits::IsInteger ? other : Scalar(1)/other); diff --git a/gtsam/3rdparty/Eigen/src/Core/SolveTriangular.h b/gtsam/3rdparty/Eigen/src/Core/SolveTriangular.h index 960da31f3..7cbcf3d80 100644 --- a/gtsam/3rdparty/Eigen/src/Core/SolveTriangular.h +++ b/gtsam/3rdparty/Eigen/src/Core/SolveTriangular.h @@ -25,8 +25,19 @@ #ifndef EIGEN_SOLVETRIANGULAR_H #define EIGEN_SOLVETRIANGULAR_H +namespace internal { + +// Forward declarations: +// The following two routines are implemented in the products/TriangularSolver*.h files +template +struct triangular_solve_vector; + +template +struct triangular_solve_matrix; + +// small helper struct extracting some traits on the underlying solver operation template -class ei_trsolve_traits +class trsolve_traits { private: enum { @@ -43,150 +54,63 @@ class ei_trsolve_traits template::Unrolling, - int StorageOrder = (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor, - int RhsVectors = ei_trsolve_traits::RhsVectors + int Unrolling = trsolve_traits::Unrolling, + int RhsVectors = trsolve_traits::RhsVectors > -struct ei_triangular_solver_selector; +struct triangular_solver_selector; -// forward and backward substitution, row-major, rhs is a vector -template -struct ei_triangular_solver_selector +template +struct triangular_solver_selector { typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; - typedef ei_blas_traits LhsProductTraits; + typedef blas_traits LhsProductTraits; typedef typename LhsProductTraits::ExtractType ActualLhsType; - typedef typename Lhs::Index Index; - enum { - IsLower = ((Mode&Lower)==Lower) - }; - static void run(const Lhs& lhs, Rhs& other) - { - static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; - ActualLhsType actualLhs = LhsProductTraits::extract(lhs); - - const Index size = lhs.cols(); - for(Index pi=IsLower ? 0 : size; - IsLower ? pi0; - IsLower ? pi+=PanelWidth : pi-=PanelWidth) - { - Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); - - Index r = IsLower ? pi : size - pi; // remaining size - if (r > 0) - { - // let's directly call the low level product function because: - // 1 - it is faster to compile - // 2 - it is slighlty faster at runtime - Index startRow = IsLower ? pi : pi-actualPanelWidth; - Index startCol = IsLower ? 0 : pi; - - ei_general_matrix_vector_product::run( - actualPanelWidth, r, - &(actualLhs.const_cast_derived().coeffRef(startRow,startCol)), actualLhs.outerStride(), - &(other.coeffRef(startCol)), other.innerStride(), - &other.coeffRef(startRow), other.innerStride(), - RhsScalar(-1)); - } - - for(Index k=0; k0) - other.coeffRef(i) -= (lhs.row(i).segment(s,k).transpose().cwiseProduct(other.segment(s,k))).sum(); - - if(!(Mode & UnitDiag)) - other.coeffRef(i) /= lhs.coeff(i,i); - } - } - } -}; - -// forward and backward substitution, column-major, rhs is a vector -template -struct ei_triangular_solver_selector -{ - typedef typename Lhs::Scalar LhsScalar; - typedef typename Rhs::Scalar RhsScalar; - typedef ei_blas_traits LhsProductTraits; - typedef typename LhsProductTraits::ExtractType ActualLhsType; - typedef typename Lhs::Index Index; - enum { - IsLower = ((Mode&Lower)==Lower) - }; - - static void run(const Lhs& lhs, Rhs& other) - { - static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; - ActualLhsType actualLhs = LhsProductTraits::extract(lhs); - - const Index size = lhs.cols(); - for(Index pi=IsLower ? 0 : size; - IsLower ? pi0; - IsLower ? pi+=PanelWidth : pi-=PanelWidth) - { - Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); - Index startBlock = IsLower ? pi : pi-actualPanelWidth; - Index endBlock = IsLower ? pi + actualPanelWidth : 0; - - for(Index k=0; k0) - other.segment(s,r) -= other.coeffRef(i) * Block(lhs, s, i, r, 1); - } - Index r = IsLower ? size - endBlock : startBlock; // remaining size - if (r > 0) - { - // let's directly call the low level product function because: - // 1 - it is faster to compile - // 2 - it is slighlty faster at runtime - ei_general_matrix_vector_product::run( - r, actualPanelWidth, - &(actualLhs.const_cast_derived().coeffRef(endBlock,startBlock)), actualLhs.outerStride(), - &other.coeff(startBlock), other.innerStride(), - &(other.coeffRef(endBlock, 0)), other.innerStride(), RhsScalar(-1)); - } - } - } -}; - -// transpose OnTheRight cases for vectors -template -struct ei_triangular_solver_selector -{ + typedef Map, Aligned> MappedRhs; static void run(const Lhs& lhs, Rhs& rhs) { - Transpose rhsTr(rhs); - Transpose lhsTr(lhs); - ei_triangular_solver_selector,Transpose,OnTheLeft,TriangularView::TransposeMode>::run(lhsTr,rhsTr); + ActualLhsType actualLhs = LhsProductTraits::extract(lhs); + + // FIXME find a way to allow an inner stride if packet_traits::size==1 + + bool useRhsDirectly = Rhs::InnerStrideAtCompileTime==1 || rhs.innerStride()==1; + RhsScalar* actualRhs; + if(useRhsDirectly) + { + actualRhs = &rhs.coeffRef(0); + } + else + { + actualRhs = ei_aligned_stack_new(RhsScalar,rhs.size()); + MappedRhs(actualRhs,rhs.size()) = rhs; + } + + triangular_solve_vector + ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs); + + if(!useRhsDirectly) + { + rhs = MappedRhs(actualRhs, rhs.size()); + ei_aligned_stack_delete(RhsScalar, actualRhs, rhs.size()); + } } }; -template -struct ei_triangular_solve_matrix; - // the rhs is a matrix -template -struct ei_triangular_solver_selector +template +struct triangular_solver_selector { typedef typename Rhs::Scalar Scalar; typedef typename Rhs::Index Index; - typedef ei_blas_traits LhsProductTraits; + typedef blas_traits LhsProductTraits; typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; static void run(const Lhs& lhs, Rhs& rhs) { const ActualLhsType actualLhs = LhsProductTraits::extract(lhs); - ei_triangular_solve_matrix - ::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeff(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride()); + ::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride()); } }; @@ -196,10 +120,10 @@ struct ei_triangular_solver_selector -struct ei_triangular_solver_unroller; +struct triangular_solver_unroller; template -struct ei_triangular_solver_unroller { +struct triangular_solver_unroller { enum { IsLower = ((Mode&Lower)==Lower), I = IsLower ? Index : Size - Index - 1, @@ -208,33 +132,47 @@ struct ei_triangular_solver_unroller { static void run(const Lhs& lhs, Rhs& rhs) { if (Index>0) - rhs.coeffRef(I) -= lhs.row(I).template segment(S).transpose().cwiseProduct(rhs.template segment(S)).sum(); + rhs.coeffRef(I) -= lhs.row(I).template segment(S).transpose() + .cwiseProduct(rhs.template segment(S)).sum(); if(!(Mode & UnitDiag)) rhs.coeffRef(I) /= lhs.coeff(I,I); - ei_triangular_solver_unroller::run(lhs,rhs); + triangular_solver_unroller::run(lhs,rhs); } }; template -struct ei_triangular_solver_unroller { +struct triangular_solver_unroller { static void run(const Lhs&, Rhs&) {} }; -template -struct ei_triangular_solver_selector { +template +struct triangular_solver_selector { static void run(const Lhs& lhs, Rhs& rhs) - { ei_triangular_solver_unroller::run(lhs,rhs); } + { triangular_solver_unroller::run(lhs,rhs); } }; +template +struct triangular_solver_selector { + static void run(const Lhs& lhs, Rhs& rhs) + { + Transpose trLhs(lhs); + Transpose trRhs(rhs); + + triangular_solver_unroller,Transpose, + ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag), + 0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs); + } +}; + +} // end namespace internal + /*************************************************************************** * TriangularView methods ***************************************************************************/ /** "in-place" version of TriangularView::solve() where the result is written in \a other - * - * * * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. * This function will const_cast it, so constness isn't honored here. @@ -246,17 +184,17 @@ template void TriangularView::solveInPlace(const MatrixBase& _other) const { OtherDerived& other = _other.const_cast_derived(); - ei_assert(cols() == rows()); - ei_assert( (Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols()) ); - ei_assert(!(Mode & ZeroDiag)); - ei_assert(Mode & (Upper|Lower)); + eigen_assert(cols() == rows()); + eigen_assert( (Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols()) ); + eigen_assert(!(Mode & ZeroDiag)); + eigen_assert(Mode & (Upper|Lower)); - enum { copy = ei_traits::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime }; - typedef typename ei_meta_if::type, OtherDerived&>::ret OtherCopy; + enum { copy = internal::traits::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime }; + typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; OtherCopy otherCopy(other); - ei_triangular_solver_selector::type, + internal::triangular_solver_selector::type, Side, Mode>::run(nestedExpression(), otherCopy); if (copy) @@ -265,43 +203,68 @@ void TriangularView::solveInPlace(const MatrixBase T.transpose().solveInPlace(M.transpose()); - * \endcode - * * \sa TriangularView::solveInPlace() */ template -template -typename ei_plain_matrix_type_column_major::type -TriangularView::solve(const MatrixBase& rhs) const +template +const internal::triangular_solve_retval,Other> +TriangularView::solve(const MatrixBase& other) const { - typename ei_plain_matrix_type_column_major::type res(rhs); - solveInPlace(res); - return res; + return internal::triangular_solve_retval(*this, other.derived()); } +namespace internal { + + +template +struct traits > +{ + typedef typename internal::plain_matrix_type_column_major::type ReturnType; +}; + +template struct triangular_solve_retval + : public ReturnByValue > +{ + typedef typename remove_all::type RhsNestedCleaned; + typedef ReturnByValue Base; + typedef typename Base::Index Index; + + triangular_solve_retval(const TriangularType& tri, const Rhs& rhs) + : m_triangularMatrix(tri), m_rhs(rhs) + {} + + inline Index rows() const { return m_rhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } + + template inline void evalTo(Dest& dst) const + { + if(!(is_same::value && extract_data(dst) == extract_data(m_rhs))) + dst = m_rhs; + m_triangularMatrix.template solveInPlace(dst); + } + + protected: + const TriangularType& m_triangularMatrix; + const typename Rhs::Nested m_rhs; +}; + +} // namespace internal + #endif // EIGEN_SOLVETRIANGULAR_H diff --git a/gtsam/3rdparty/Eigen/src/Core/StableNorm.h b/gtsam/3rdparty/Eigen/src/Core/StableNorm.h index c3c230796..fdf113bfe 100644 --- a/gtsam/3rdparty/Eigen/src/Core/StableNorm.h +++ b/gtsam/3rdparty/Eigen/src/Core/StableNorm.h @@ -25,13 +25,14 @@ #ifndef EIGEN_STABLENORM_H #define EIGEN_STABLENORM_H +namespace internal { template -inline void ei_stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) +inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) { Scalar max = bl.cwiseAbs().maxCoeff(); if (max>scale) { - ssq = ssq * ei_abs2(scale/max); + ssq = ssq * abs2(scale/max); scale = max; invScale = Scalar(1)/scale; } @@ -39,6 +40,7 @@ inline void ei_stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& // then we can neglect this sub vector ssq += (bl*invScale).squaredNorm(); } +} /** \returns the \em l2 norm of \c *this avoiding underflow and overflow. * This version use a blockwise two passes algorithm: @@ -51,7 +53,7 @@ inline void ei_stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& * \sa norm(), blueNorm(), hypotNorm() */ template -inline typename NumTraits::Scalar>::Real +inline typename NumTraits::Scalar>::Real MatrixBase::stableNorm() const { const Index blockSize = 4096; @@ -62,12 +64,12 @@ MatrixBase::stableNorm() const Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0 }; Index n = size(); - Index bi = ei_first_aligned(derived()); + Index bi = internal::first_aligned(derived()); if (bi>0) - ei_stable_norm_kernel(this->head(bi), ssq, scale, invScale); + internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale); for (; bisegment(bi,std::min(blockSize, n - bi)).template forceAlignedAccessIf(), ssq, scale, invScale); - return scale * ei_sqrt(ssq); + internal::stable_norm_kernel(this->segment(bi,std::min(blockSize, n - bi)).template forceAlignedAccessIf(), ssq, scale, invScale); + return scale * internal::sqrt(ssq); } /** \returns the \em l2 norm of \c *this using the Blue's algorithm. @@ -80,7 +82,7 @@ MatrixBase::stableNorm() const * \sa norm(), stableNorm(), hypotNorm() */ template -inline typename NumTraits::Scalar>::Real +inline typename NumTraits::Scalar>::Real MatrixBase::blueNorm() const { static Index nmax = -1; @@ -116,7 +118,7 @@ MatrixBase::blueNorm() const overfl = rbig*s2m; // overflow boundary for abig eps = RealScalar(std::pow(double(ibeta), 1-it)); - relerr = ei_sqrt(eps); // tolerance for neglecting asml + relerr = internal::sqrt(eps); // tolerance for neglecting asml abig = RealScalar(1.0/eps - 1.0); if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n else nmax = nbig; @@ -128,23 +130,23 @@ MatrixBase::blueNorm() const RealScalar abig = RealScalar(0); for(Index j=0; j ab2) abig += ei_abs2(ax*s2m); - else if(ax < b1) asml += ei_abs2(ax*s1m); - else amed += ei_abs2(ax); + RealScalar ax = internal::abs(coeff(j)); + if(ax > ab2) abig += internal::abs2(ax*s2m); + else if(ax < b1) asml += internal::abs2(ax*s1m); + else amed += internal::abs2(ax); } if(abig > RealScalar(0)) { - abig = ei_sqrt(abig); + abig = internal::sqrt(abig); if(abig > overfl) { - ei_assert(false && "overflow"); + eigen_assert(false && "overflow"); return rbig; } if(amed > RealScalar(0)) { abig = abig/s2m; - amed = ei_sqrt(amed); + amed = internal::sqrt(amed); } else return abig/s2m; @@ -153,20 +155,20 @@ MatrixBase::blueNorm() const { if (amed > RealScalar(0)) { - abig = ei_sqrt(amed); - amed = ei_sqrt(asml) / s1m; + abig = internal::sqrt(amed); + amed = internal::sqrt(asml) / s1m; } else - return ei_sqrt(asml)/s1m; + return internal::sqrt(asml)/s1m; } else - return ei_sqrt(amed); + return internal::sqrt(amed); asml = std::min(abig, amed); abig = std::max(abig, amed); if(asml <= abig*relerr) return abig; else - return abig * ei_sqrt(RealScalar(1) + ei_abs2(asml/abig)); + return abig * internal::sqrt(RealScalar(1) + internal::abs2(asml/abig)); } /** \returns the \em l2 norm of \c *this avoiding undeflow and overflow. @@ -175,10 +177,10 @@ MatrixBase::blueNorm() const * \sa norm(), stableNorm() */ template -inline typename NumTraits::Scalar>::Real +inline typename NumTraits::Scalar>::Real MatrixBase::hypotNorm() const { - return this->cwiseAbs().redux(ei_scalar_hypot_op()); + return this->cwiseAbs().redux(internal::scalar_hypot_op()); } #endif // EIGEN_STABLENORM_H diff --git a/gtsam/3rdparty/Eigen/src/Core/Stride.h b/gtsam/3rdparty/Eigen/src/Core/Stride.h index 47515b548..0430f1116 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Stride.h +++ b/gtsam/3rdparty/Eigen/src/Core/Stride.h @@ -51,7 +51,7 @@ * \include Map_general_stride.cpp * Output: \verbinclude Map_general_stride.out * - * \sa class InnerStride, class OuterStride + * \sa class InnerStride, class OuterStride, \ref TopicStorageOrders */ template class Stride @@ -67,14 +67,14 @@ class Stride Stride() : m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime) { - ei_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic); + eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic); } /** Constructor allowing to pass the strides at runtime */ Stride(Index outerStride, Index innerStride) : m_outer(outerStride), m_inner(innerStride) { - ei_assert(innerStride>=0 && outerStride>=0); + eigen_assert(innerStride>=0 && outerStride>=0); } /** Copy constructor */ @@ -88,8 +88,8 @@ class Stride inline Index inner() const { return m_inner.value(); } protected: - ei_variable_if_dynamic m_outer; - ei_variable_if_dynamic m_inner; + internal::variable_if_dynamic m_outer; + internal::variable_if_dynamic m_inner; }; /** \brief Convenience specialization of Stride to specify only an inner stride diff --git a/gtsam/3rdparty/Eigen/src/Core/Swap.h b/gtsam/3rdparty/Eigen/src/Core/Swap.h index 086d7f32c..5fb032866 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Swap.h +++ b/gtsam/3rdparty/Eigen/src/Core/Swap.h @@ -32,17 +32,19 @@ * * \brief Internal helper class for swapping two expressions */ +namespace internal { template -struct ei_traits > : ei_traits {}; +struct traits > : traits {}; +} template class SwapWrapper - : public ei_dense_xpr_base >::type + : public internal::dense_xpr_base >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper) - typedef typename ei_packet_traits::type Packet; + typedef typename internal::packet_traits::type Packet; inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {} @@ -61,11 +63,21 @@ template class SwapWrapper return m_expression.const_cast_derived().coeffRef(index); } + inline Scalar& coeffRef(Index row, Index col) const + { + return m_expression.coeffRef(row, col); + } + + inline Scalar& coeffRef(Index index) const + { + return m_expression.coeffRef(index); + } + template void copyCoeff(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); Scalar tmp = m_expression.coeff(row, col); m_expression.coeffRef(row, col) = _other.coeff(row, col); @@ -76,7 +88,7 @@ template class SwapWrapper void copyCoeff(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(index >= 0 && index < m_expression.size()); + eigen_internal_assert(index >= 0 && index < m_expression.size()); Scalar tmp = m_expression.coeff(index); m_expression.coeffRef(index) = _other.coeff(index); _other.coeffRef(index) = tmp; @@ -86,7 +98,7 @@ template class SwapWrapper void copyPacket(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(row >= 0 && row < rows() + eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); Packet tmp = m_expression.template packet(row, col); m_expression.template writePacket(row, col, @@ -99,7 +111,7 @@ template class SwapWrapper void copyPacket(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); - ei_internal_assert(index >= 0 && index < m_expression.size()); + eigen_internal_assert(index >= 0 && index < m_expression.size()); Packet tmp = m_expression.template packet(index); m_expression.template writePacket(index, _other.template packet(index) @@ -111,18 +123,4 @@ template class SwapWrapper ExpressionType& m_expression; }; -/** swaps *this with the expression \a other. - * - * \note \a other is only marked for internal reasons, but of course - * it gets const-casted. One reason is that one will often call swap - * on temporary objects (hence non-const references are forbidden). - * Another reason is that lazyAssign takes a const argument anyway. - */ -template -template -void DenseBase::swap(DenseBase EIGEN_REF_TO_TEMPORARY other) -{ - (SwapWrapper(derived())).lazyAssign(other); -} - #endif // EIGEN_SWAP_H diff --git a/gtsam/3rdparty/Eigen/src/Core/Transpose.h b/gtsam/3rdparty/Eigen/src/Core/Transpose.h index de8ae0a5b..b521f9319 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Transpose.h +++ b/gtsam/3rdparty/Eigen/src/Core/Transpose.h @@ -39,37 +39,43 @@ * * \sa MatrixBase::transpose(), MatrixBase::adjoint() */ + +namespace internal { template -struct ei_traits > : ei_traits +struct traits > : traits { typedef typename MatrixType::Scalar Scalar; - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::XprKind XprKind; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type MatrixTypeNestedPlain; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - Flags = int(_MatrixTypeNested::Flags & ~NestByRefBit) ^ RowMajorBit, - CoeffReadCost = _MatrixTypeNested::CoeffReadCost, - InnerStrideAtCompileTime = ei_inner_stride_at_compile_time::ret, - OuterStrideAtCompileTime = ei_outer_stride_at_compile_time::ret + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit), + Flags1 = Flags0 | FlagsLvalueBit, + Flags = Flags1 ^ RowMajorBit, + CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost, + InnerStrideAtCompileTime = inner_stride_at_compile_time::ret, + OuterStrideAtCompileTime = outer_stride_at_compile_time::ret }; }; +} template class TransposeImpl; template class Transpose - : public TransposeImpl::StorageKind> + : public TransposeImpl::StorageKind> { public: - typedef typename TransposeImpl::StorageKind>::Base Base; + typedef typename TransposeImpl::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose) - inline Transpose(const MatrixType& matrix) : m_matrix(matrix) {} + inline Transpose(MatrixType& matrix) : m_matrix(matrix) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) @@ -77,50 +83,73 @@ template class Transpose inline Index cols() const { return m_matrix.rows(); } /** \returns the nested expression */ - const typename ei_cleantype::type& + const typename internal::remove_all::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ - typename ei_cleantype::type& + typename internal::remove_all::type& nestedExpression() { return m_matrix.const_cast_derived(); } protected: const typename MatrixType::Nested m_matrix; }; -template::ret> -struct ei_TransposeImpl_base +namespace internal { + +template::ret> +struct TransposeImpl_base { - typedef typename ei_dense_xpr_base >::type type; + typedef typename dense_xpr_base >::type type; }; template -struct ei_TransposeImpl_base +struct TransposeImpl_base { - typedef typename ei_dense_xpr_base >::type type; + typedef typename dense_xpr_base >::type type; }; +} // end namespace internal + template class TransposeImpl - : public ei_TransposeImpl_base::type + : public internal::TransposeImpl_base::type { public: - typedef typename ei_TransposeImpl_base::type Base; + typedef typename internal::TransposeImpl_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Transpose) inline Index innerStride() const { return derived().nestedExpression().innerStride(); } inline Index outerStride() const { return derived().nestedExpression().outerStride(); } - inline Scalar* data() { return derived().nestedExpression().data(); } + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); } inline const Scalar* data() const { return derived().nestedExpression().data(); } - inline Scalar& coeffRef(Index row, Index col) + inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) { - return const_cast_derived().nestedExpression().coeffRef(col, row); + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return derived().nestedExpression().const_cast_derived().coeffRef(col, row); } - inline Scalar& coeffRef(Index index) + inline ScalarWithConstIfNotLvalue& coeffRef(Index index) { - return const_cast_derived().nestedExpression().coeffRef(index); + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return derived().nestedExpression().const_cast_derived().coeffRef(index); + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return derived().nestedExpression().coeffRef(col, row); + } + + inline const Scalar& coeffRef(Index index) const + { + return derived().nestedExpression().coeffRef(index); } inline const CoeffReturnType coeff(Index row, Index col) const @@ -142,7 +171,7 @@ template class TransposeImpl template inline void writePacket(Index row, Index col, const PacketScalar& x) { - const_cast_derived().nestedExpression().template writePacket(col, row, x); + derived().nestedExpression().const_cast_derived().template writePacket(col, row, x); } template @@ -154,7 +183,7 @@ template class TransposeImpl template inline void writePacket(Index index, const PacketScalar& x) { - const_cast_derived().nestedExpression().template writePacket(index, x); + derived().nestedExpression().const_cast_derived().template writePacket(index, x); } }; @@ -190,10 +219,10 @@ DenseBase::transpose() * * \sa transposeInPlace(), adjoint() */ template -inline const Transpose +inline const typename DenseBase::ConstTransposeReturnType DenseBase::transpose() const { - return derived(); + return ConstTransposeReturnType(derived()); } /** \returns an expression of the adjoint (i.e. conjugate transpose) of *this. @@ -214,31 +243,34 @@ DenseBase::transpose() const * m = m.adjoint().eval(); * \endcode * - * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class ei_scalar_conjugate_op */ + * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */ template inline const typename MatrixBase::AdjointReturnType MatrixBase::adjoint() const { - return this->transpose(); + return this->transpose(); // in the complex case, the .conjugate() is be implicit here + // due to implicit conversion to return type } /*************************************************************************** * "in place" transpose implementation ***************************************************************************/ +namespace internal { + template -struct ei_inplace_transpose_selector; +struct inplace_transpose_selector; template -struct ei_inplace_transpose_selector { // square matrix +struct inplace_transpose_selector { // square matrix static void run(MatrixType& m) { m.template triangularView().swap(m.transpose()); } }; template -struct ei_inplace_transpose_selector { // non square matrix +struct inplace_transpose_selector { // non square matrix static void run(MatrixType& m) { if (m.rows()==m.cols()) m.template triangularView().swap(m.transpose()); @@ -247,6 +279,8 @@ struct ei_inplace_transpose_selector { // non square matrix } }; +} // end namespace internal + /** This is the "in place" version of transpose(): it replaces \c *this by its own transpose. * Thus, doing * \code @@ -268,7 +302,7 @@ struct ei_inplace_transpose_selector { // non square matrix template inline void DenseBase::transposeInPlace() { - ei_inplace_transpose_selector::run(derived()); + internal::inplace_transpose_selector::run(derived()); } /*************************************************************************** @@ -303,45 +337,47 @@ inline void MatrixBase::adjointInPlace() // The following is to detect aliasing problems in most common cases. +namespace internal { + template -struct ei_blas_traits > - : ei_blas_traits +struct blas_traits > + : blas_traits { typedef SelfCwiseBinaryOp XprType; static inline const XprType extract(const XprType& x) { return x; } }; template -struct ei_check_transpose_aliasing_compile_time_selector +struct check_transpose_aliasing_compile_time_selector { - enum { ret = ei_blas_traits::IsTransposed != DestIsTransposed + enum { ret = blas_traits::IsTransposed != DestIsTransposed }; }; template -struct ei_check_transpose_aliasing_compile_time_selector > +struct check_transpose_aliasing_compile_time_selector > { - enum { ret = ei_blas_traits::IsTransposed != DestIsTransposed - || ei_blas_traits::IsTransposed != DestIsTransposed + enum { ret = blas_traits::IsTransposed != DestIsTransposed + || blas_traits::IsTransposed != DestIsTransposed }; }; template -struct ei_check_transpose_aliasing_run_time_selector +struct check_transpose_aliasing_run_time_selector { static bool run(const Scalar* dest, const OtherDerived& src) { - return (ei_blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)ei_extract_data(src)); + return (blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src)); } }; template -struct ei_check_transpose_aliasing_run_time_selector > +struct check_transpose_aliasing_run_time_selector > { static bool run(const Scalar* dest, const CwiseBinaryOp& src) { - return ((ei_blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)ei_extract_data(src.lhs()))) - || ((ei_blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)ei_extract_data(src.rhs()))); + return ((blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.lhs()))) + || ((blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.rhs()))); } }; @@ -353,16 +389,16 @@ struct ei_check_transpose_aliasing_run_time_selector::IsTransposed,OtherDerived>::ret + = check_transpose_aliasing_compile_time_selector + ::IsTransposed,OtherDerived>::ret > struct checkTransposeAliasing_impl { static void run(const Derived& dst, const OtherDerived& other) { - ei_assert((!ei_check_transpose_aliasing_run_time_selector - ::IsTransposed,OtherDerived> - ::run(ei_extract_data(dst), other)) + eigen_assert((!check_transpose_aliasing_run_time_selector + ::IsTransposed,OtherDerived> + ::run(extract_data(dst), other)) && "aliasing detected during tranposition, use transposeInPlace() " "or evaluate the rhs into a temporary using .eval()"); @@ -377,12 +413,13 @@ struct checkTransposeAliasing_impl } }; +} // end namespace internal template template void DenseBase::checkTransposeAliasing(const OtherDerived& other) const { - checkTransposeAliasing_impl::run(derived(), other); + internal::checkTransposeAliasing_impl::run(derived(), other); } #endif diff --git a/gtsam/3rdparty/Eigen/src/Core/Transpositions.h b/gtsam/3rdparty/Eigen/src/Core/Transpositions.h index 6703b1e58..88fdfb222 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Transpositions.h +++ b/gtsam/3rdparty/Eigen/src/Core/Transpositions.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2010-2011 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -53,90 +53,75 @@ * * \sa class PermutationMatrix */ -template struct ei_transposition_matrix_product_retval; -template -class Transpositions +namespace internal { +template struct transposition_matrix_product_retval; +} + +template +class TranspositionsBase { + typedef internal::traits Traits; + public: - typedef Matrix IndicesType; - typedef typename IndicesType::Index Index; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar Index; - inline Transpositions() {} - - /** Copy constructor. */ - template - inline Transpositions(const Transpositions& other) - : m_indices(other.indices()) {} - - #ifndef EIGEN_PARSED_BY_DOXYGEN - /** Standard copy constructor. Defined only to prevent a default copy constructor - * from hiding the other templated constructor */ - inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {} - #endif - - /** Generic constructor from expression of the transposition indices. */ - template - explicit inline Transpositions(const MatrixBase& indices) : m_indices(indices) - {} + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } /** Copies the \a other transpositions into \c *this */ - template - Transpositions& operator=(const Transpositions& other) + template + Derived& operator=(const TranspositionsBase& other) { - m_indices = other.indices(); - return *this; + indices() = other.indices(); + return derived(); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ - Transpositions& operator=(const Transpositions& other) + Derived& operator=(const TranspositionsBase& other) { - m_indices = other.m_indices; - return *this; + indices() = other.indices(); + return derived(); } #endif - /** Constructs an uninitialized permutation matrix of given size. - */ - inline Transpositions(Index size) : m_indices(size) - {} - /** \returns the number of transpositions */ - inline Index size() const { return m_indices.size(); } + inline Index size() const { return indices().size(); } /** Direct access to the underlying index vector */ - inline const Index& coeff(Index i) const { return m_indices.coeff(i); } + inline const Index& coeff(Index i) const { return indices().coeff(i); } /** Direct access to the underlying index vector */ - inline Index& coeffRef(Index i) { return m_indices.coeffRef(i); } + inline Index& coeffRef(Index i) { return indices().coeffRef(i); } /** Direct access to the underlying index vector */ - inline const Index& operator()(Index i) const { return m_indices(i); } + inline const Index& operator()(Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ - inline Index& operator()(Index i) { return m_indices(i); } + inline Index& operator()(Index i) { return indices()(i); } /** Direct access to the underlying index vector */ - inline const Index& operator[](Index i) const { return m_indices(i); } + inline const Index& operator[](Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ - inline Index& operator[](Index i) { return m_indices(i); } + inline Index& operator[](Index i) { return indices()(i); } /** const version of indices(). */ - const IndicesType& indices() const { return m_indices; } + const IndicesType& indices() const { return derived().indices(); } /** \returns a reference to the stored array representing the transpositions. */ - IndicesType& indices() { return m_indices; } + IndicesType& indices() { return derived().indices(); } /** Resizes to given size. */ inline void resize(int size) { - m_indices.resize(size); + indices().resize(size); } /** Sets \c *this to represents an identity transformation */ void setIdentity() { - for(int i = 0; i < m_indices.size(); ++i) - m_indices.coeffRef(i) = i; + for(int i = 0; i < indices().size(); ++i) + coeffRef(i) = i; } // FIXME: do we want such methods ? @@ -161,69 +146,238 @@ class Transpositions */ /** \returns the inverse transformation */ - inline Transpose inverse() const - { return *this; } + inline Transpose inverse() const + { return Transpose(derived()); } /** \returns the tranpose transformation */ - inline Transpose transpose() const - { return *this; } + inline Transpose transpose() const + { return Transpose(derived()); } -#ifndef EIGEN_PARSED_BY_DOXYGEN - template - Transpositions(const Transpose >& other) - : m_indices(other.size()) + protected: +}; + +namespace internal { +template +struct traits > +{ + typedef IndexType Index; + typedef Matrix IndicesType; +}; +} + +template +class Transpositions : public TranspositionsBase > +{ + typedef internal::traits Traits; + public: + + typedef TranspositionsBase Base; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar Index; + + inline Transpositions() {} + + /** Copy constructor. */ + template + inline Transpositions(const TranspositionsBase& other) + : m_indices(other.indices()) {} + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** Standard copy constructor. Defined only to prevent a default copy constructor + * from hiding the other templated constructor */ + inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {} + #endif + + /** Generic constructor from expression of the transposition indices. */ + template + explicit inline Transpositions(const MatrixBase& indices) : m_indices(indices) + {} + + /** Copies the \a other transpositions into \c *this */ + template + Transpositions& operator=(const TranspositionsBase& other) { - Index n = size(); - Index j = size-1; - for(Index i=0; i +struct traits,_PacketAccess> > +{ + typedef IndexType Index; + typedef Map, _PacketAccess> IndicesType; +}; +} + +template +class Map,PacketAccess> + : public TranspositionsBase,PacketAccess> > +{ + typedef internal::traits Traits; + public: + + typedef TranspositionsBase Base; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar Index; + + inline Map(const Index* indices) + : m_indices(indices) + {} + + inline Map(const Index* indices, Index size) + : m_indices(indices,size) + {} + + /** Copies the \a other transpositions into \c *this */ + template + Map& operator=(const TranspositionsBase& other) + { + return Base::operator=(other); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Map& operator=(const Map& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + + /** \returns a reference to the stored array representing the transpositions. */ + IndicesType& indices() { return m_indices; } + + protected: + + IndicesType m_indices; +}; + +namespace internal { +template +struct traits > +{ + typedef typename _IndicesType::Scalar Index; + typedef _IndicesType IndicesType; +}; +} + +template +class TranspositionsWrapper + : public TranspositionsBase > +{ + typedef internal::traits Traits; + public: + + typedef TranspositionsBase Base; + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar Index; + + inline TranspositionsWrapper(IndicesType& indices) + : m_indices(indices) + {} + + /** Copies the \a other transpositions into \c *this */ + template + TranspositionsWrapper& operator=(const TranspositionsBase& other) + { + return Base::operator=(other); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + TranspositionsWrapper& operator=(const TranspositionsWrapper& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + + /** \returns a reference to the stored array representing the transpositions. */ + IndicesType& indices() { return m_indices; } + + protected: + + const typename IndicesType::Nested m_indices; +}; + /** \returns the \a matrix with the \a transpositions applied to the columns. */ -template -inline const ei_transposition_matrix_product_retval, Derived, OnTheRight> +template +inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix, - const Transpositions &transpositions) + const TranspositionsBase &transpositions) { - return ei_transposition_matrix_product_retval - , Derived, OnTheRight> - (transpositions, matrix.derived()); + return internal::transposition_matrix_product_retval + + (transpositions.derived(), matrix.derived()); } /** \returns the \a matrix with the \a transpositions applied to the rows. */ -template -inline const ei_transposition_matrix_product_retval - , Derived, OnTheLeft> -operator*(const Transpositions &transpositions, +template +inline const internal::transposition_matrix_product_retval + +operator*(const TranspositionsBase &transpositions, const MatrixBase& matrix) { - return ei_transposition_matrix_product_retval - , Derived, OnTheLeft> - (transpositions, matrix.derived()); + return internal::transposition_matrix_product_retval + + (transpositions.derived(), matrix.derived()); } +namespace internal { + template -struct ei_traits > +struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template -struct ei_transposition_matrix_product_retval - : public ReturnByValue > +struct transposition_matrix_product_retval + : public ReturnByValue > { - typedef typename ei_cleantype::type MatrixTypeNestedCleaned; + typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename TranspositionType::Index Index; - ei_transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix) + transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix) : m_transpositions(tr), m_matrix(matrix) {} @@ -235,7 +389,7 @@ struct ei_transposition_matrix_product_retval const int size = m_transpositions.size(); Index j = 0; - if(!(ei_is_same_type::ret && ei_extract_data(dst) == ei_extract_data(m_matrix))) + if(!(is_same::value && extract_data(dst) == extract_data(m_matrix))) dst = m_matrix; for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k -class Transpose > +template +class Transpose > { - typedef Transpositions TranspositionType; + typedef TranspositionsDerived TranspositionType; typedef typename TranspositionType::IndicesType IndicesType; public: @@ -269,23 +425,21 @@ class Transpose > /** \returns the \a matrix with the inverse transpositions applied to the columns. */ template friend - inline const ei_transposition_matrix_product_retval + inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix, const Transpose& trt) { - return ei_transposition_matrix_product_retval(trt.m_transpositions, matrix.derived()); + return internal::transposition_matrix_product_retval(trt.m_transpositions, matrix.derived()); } /** \returns the \a matrix with the inverse transpositions applied to the rows. */ template - inline const ei_transposition_matrix_product_retval + inline const internal::transposition_matrix_product_retval operator*(const MatrixBase& matrix) const { - return ei_transposition_matrix_product_retval(m_transpositions, matrix.derived()); + return internal::transposition_matrix_product_retval(m_transpositions, matrix.derived()); } - const TranspositionType& nestedTranspositions() const { return m_transpositions; } - protected: const TranspositionType& m_transpositions; }; diff --git a/gtsam/3rdparty/Eigen/src/Core/TriangularMatrix.h b/gtsam/3rdparty/Eigen/src/Core/TriangularMatrix.h index 6e4e5b2cc..f9fedcb0f 100644 --- a/gtsam/3rdparty/Eigen/src/Core/TriangularMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/TriangularMatrix.h @@ -26,6 +26,12 @@ #ifndef EIGEN_TRIANGULARMATRIX_H #define EIGEN_TRIANGULARMATRIX_H +namespace internal { + +template struct triangular_solve_retval; + +} + /** \internal * * \class TriangularBase @@ -38,18 +44,20 @@ template class TriangularBase : public EigenBase public: enum { - Mode = ei_traits::Mode, - CoeffReadCost = ei_traits::CoeffReadCost, - RowsAtCompileTime = ei_traits::RowsAtCompileTime, - ColsAtCompileTime = ei_traits::ColsAtCompileTime, - MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, - MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime + Mode = internal::traits::Mode, + CoeffReadCost = internal::traits::CoeffReadCost, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::DenseMatrixType DenseMatrixType; + typedef DenseMatrixType DenseType; - inline TriangularBase() { ei_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } + inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } inline Index rows() const { return derived().rows(); } inline Index cols() const { return derived().cols(); } @@ -88,17 +96,25 @@ template class TriangularBase : public EigenBase template void evalToLazy(MatrixBase &other) const; + DenseMatrixType toDenseMatrix() const + { + DenseMatrixType res(rows(), cols()); + evalToLazy(res); + return res; + } + protected: void check_coordinates(Index row, Index col) const { EIGEN_ONLY_USED_FOR_DEBUG(row); EIGEN_ONLY_USED_FOR_DEBUG(col); - ei_assert(col>=0 && col=0 && row=row) - || (Mode==Lower && col<=row) - || ((Mode==StrictlyUpper || Mode==UnitUpper) && col>row) - || ((Mode==StrictlyLower || Mode==UnitLower) && col=0 && col=0 && row=row) + || (mode==Lower && col<=row) + || ((mode==StrictlyUpper || mode==UnitUpper) && col>row) + || ((mode==StrictlyLower || mode==UnitLower) && col class TriangularBase : public EigenBase * * \sa MatrixBase::triangularView() */ +namespace internal { template -struct ei_traits > : ei_traits +struct traits > : traits { - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type MatrixTypeNestedNonRef; + typedef typename remove_all::type MatrixTypeNestedCleaned; typedef MatrixType ExpressionType; + typedef typename MatrixType::PlainObject DenseMatrixType; enum { Mode = _Mode, - Flags = (_MatrixTypeNested::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode, - CoeffReadCost = _MatrixTypeNested::CoeffReadCost + Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode, + CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost }; }; +} template class TriangularView public: typedef TriangularBase Base; - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; typedef _MatrixType MatrixType; - typedef typename MatrixType::PlainObject DenseMatrixType; + typedef typename internal::traits::DenseMatrixType DenseMatrixType; + typedef DenseMatrixType PlainObject; protected: - typedef typename MatrixType::Nested MatrixTypeNested; - typedef typename ei_cleantype::type _MatrixTypeNested; - typedef typename ei_cleantype::type MatrixConjugateReturnType; + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::MatrixTypeNestedNonRef MatrixTypeNestedNonRef; + typedef typename internal::traits::MatrixTypeNestedCleaned MatrixTypeNestedCleaned; + + typedef typename internal::remove_all::type MatrixConjugateReturnType; public: using Base::evalToLazy; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; enum { Mode = _Mode, @@ -179,7 +202,7 @@ template class TriangularView }; inline TriangularView(const MatrixType& matrix) : m_matrix(matrix) - { ei_assert(ei_are_flags_consistent::ret); } + {} inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } @@ -187,13 +210,13 @@ template class TriangularView inline Index innerStride() const { return m_matrix.innerStride(); } /** \sa MatrixBase::operator+=() */ - template TriangularView& operator+=(const Other& other) { return *this = m_matrix + other; } + template TriangularView& operator+=(const DenseBase& other) { return *this = m_matrix + other.derived(); } /** \sa MatrixBase::operator-=() */ - template TriangularView& operator-=(const Other& other) { return *this = m_matrix - other; } + template TriangularView& operator-=(const DenseBase& other) { return *this = m_matrix - other.derived(); } /** \sa MatrixBase::operator*=() */ - TriangularView& operator*=(const typename ei_traits::Scalar& other) { return *this = m_matrix * other; } + TriangularView& operator*=(const typename internal::traits::Scalar& other) { return *this = m_matrix * other; } /** \sa MatrixBase::operator/=() */ - TriangularView& operator/=(const typename ei_traits::Scalar& other) { return *this = m_matrix / other; } + TriangularView& operator/=(const typename internal::traits::Scalar& other) { return *this = m_matrix / other; } /** \sa MatrixBase::fill() */ void fill(const Scalar& value) { setConstant(value); } @@ -223,8 +246,8 @@ template class TriangularView return m_matrix.const_cast_derived().coeffRef(row, col); } - const MatrixType& nestedExpression() const { return m_matrix; } - MatrixType& nestedExpression() { return const_cast(m_matrix); } + const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } + MatrixTypeNestedCleaned& nestedExpression() { return *const_cast(&m_matrix); } /** Assigns a triangular matrix to a triangular part of a dense matrix */ template @@ -258,18 +281,14 @@ template class TriangularView /** \sa MatrixBase::transpose() */ inline TriangularView,TransposeMode> transpose() - { return m_matrix.transpose(); } + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.const_cast_derived().transpose(); + } /** \sa MatrixBase::transpose() const */ inline const TriangularView,TransposeMode> transpose() const { return m_matrix.transpose(); } - DenseMatrixType toDenseMatrix() const - { - DenseMatrixType res(rows(), cols()); - evalToLazy(res); - return res; - } - /** Efficient triangular matrix times vector/matrix product */ template TriangularProduct @@ -290,42 +309,70 @@ template class TriangularView (lhs.derived(),rhs.m_matrix); } + #ifdef EIGEN2_SUPPORT + template + struct eigen2_product_return_type + { + typedef typename TriangularView::DenseMatrixType DenseMatrixType; + typedef typename OtherDerived::PlainObject::DenseType OtherPlainObject; + typedef typename ProductReturnType::Type ProdRetType; + typedef typename ProdRetType::PlainObject type; + }; + template + const typename eigen2_product_return_type::type + operator*(const EigenBase& rhs) const + { + typename OtherDerived::PlainObject::DenseType rhsPlainObject; + rhs.evalTo(rhsPlainObject); + return this->toDenseMatrix() * rhsPlainObject; + } + template + bool isApprox(const TriangularView& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const + { + return this->toDenseMatrix().isApprox(other.toDenseMatrix(), precision); + } + template + bool isApprox(const MatrixBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const + { + return this->toDenseMatrix().isApprox(other, precision); + } + #endif // EIGEN2_SUPPORT - template - typename ei_plain_matrix_type_column_major::type - solve(const MatrixBase& other) const; + template + inline const internal::triangular_solve_retval + solve(const MatrixBase& other) const; template void solveInPlace(const MatrixBase& other) const; - template - typename ei_plain_matrix_type_column_major::type - solve(const MatrixBase& other) const + template + inline const internal::triangular_solve_retval + solve(const MatrixBase& other) const { return solve(other); } template void solveInPlace(const MatrixBase& other) const { return solveInPlace(other); } - const SelfAdjointView<_MatrixTypeNested,Mode> selfadjointView() const + const SelfAdjointView selfadjointView() const { EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR); - return SelfAdjointView<_MatrixTypeNested,Mode>(m_matrix); + return SelfAdjointView(m_matrix); } - SelfAdjointView<_MatrixTypeNested,Mode> selfadjointView() + SelfAdjointView selfadjointView() { EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR); - return SelfAdjointView<_MatrixTypeNested,Mode>(m_matrix); + return SelfAdjointView(m_matrix); } template - void swap(TriangularBase EIGEN_REF_TO_TEMPORARY other) + void swap(TriangularBase const & other) { TriangularView,Mode>(const_cast(m_matrix)).lazyAssign(other.derived()); } template - void swap(MatrixBase EIGEN_REF_TO_TEMPORARY other) + void swap(MatrixBase const & other) { TriangularView,Mode>(const_cast(m_matrix)).lazyAssign(other.derived()); } @@ -339,8 +386,51 @@ template class TriangularView else return m_matrix.diagonal().prod(); } - + + // TODO simplify the following: + template + EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase& other) + { + setZero(); + return assignProduct(other,1); + } + + template + EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase& other) + { + return assignProduct(other,1); + } + + template + EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase& other) + { + return assignProduct(other,-1); + } + + + template + EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct& other) + { + setZero(); + return assignProduct(other,other.alpha()); + } + + template + EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct& other) + { + return assignProduct(other,other.alpha()); + } + + template + EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct& other) + { + return assignProduct(other,-other.alpha()); + } + protected: + + template + EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase& prod, const Scalar& alpha); const MatrixTypeNested m_matrix; }; @@ -349,8 +439,10 @@ template class TriangularView * Implementation of triangular evaluation/assignment ***************************************************************************/ +namespace internal { + template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { enum { col = (UnrollCount-1) / Derived1::RowsAtCompileTime, @@ -359,9 +451,9 @@ struct ei_triangular_assignment_selector inline static void run(Derived1 &dst, const Derived2 &src) { - ei_triangular_assignment_selector::run(dst, src); + triangular_assignment_selector::run(dst, src); - ei_assert( Mode == Upper || Mode == Lower + eigen_assert( Mode == Upper || Mode == Lower || Mode == StrictlyUpper || Mode == StrictlyLower || Mode == UnitUpper || Mode == UnitLower); if((Mode == Upper && row <= col) @@ -383,13 +475,13 @@ struct ei_triangular_assignment_selector // prevent buggy user code from causing an infinite recursion template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { inline static void run(Derived1 &, const Derived2 &) {} }; template -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -407,7 +499,7 @@ struct ei_triangular_assignment_selector -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -425,7 +517,7 @@ struct ei_triangular_assignment_selector -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -443,7 +535,7 @@ struct ei_triangular_assignment_selector -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -461,7 +553,7 @@ struct ei_triangular_assignment_selector -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -481,7 +573,7 @@ struct ei_triangular_assignment_selector -struct ei_triangular_assignment_selector +struct triangular_assignment_selector { typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) @@ -501,6 +593,8 @@ struct ei_triangular_assignment_selector template @@ -509,7 +603,7 @@ TriangularView::operator=(const MatrixBase& othe { if(OtherDerived::Flags & EvalBeforeAssigningBit) { - typename ei_plain_matrix_type::type other_evaluated(other.rows(), other.cols()); + typename internal::plain_matrix_type::type other_evaluated(other.rows(), other.cols()); other_evaluated.template triangularView().lazyAssign(other.derived()); lazyAssign(other_evaluated); } @@ -525,12 +619,12 @@ void TriangularView::lazyAssign(const MatrixBase { enum { unroll = MatrixType::SizeAtCompileTime != Dynamic - && ei_traits::CoeffReadCost != Dynamic - && MatrixType::SizeAtCompileTime*ei_traits::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT + && internal::traits::CoeffReadCost != Dynamic + && MatrixType::SizeAtCompileTime*internal::traits::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT }; - ei_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols()); + eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols()); - ei_triangular_assignment_selector + internal::triangular_assignment_selector inline TriangularView& TriangularView::operator=(const TriangularBase& other) { - ei_assert(Mode == int(OtherDerived::Mode)); - if(ei_traits::Flags & EvalBeforeAssigningBit) + eigen_assert(Mode == int(OtherDerived::Mode)); + if(internal::traits::Flags & EvalBeforeAssigningBit) { typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols()); other_evaluated.template triangularView().lazyAssign(other.derived().nestedExpression()); @@ -562,13 +656,13 @@ void TriangularView::lazyAssign(const TriangularBase::CoeffReadCost != Dynamic - && MatrixType::SizeAtCompileTime * ei_traits::CoeffReadCost / 2 + && internal::traits::CoeffReadCost != Dynamic + && MatrixType::SizeAtCompileTime * internal::traits::CoeffReadCost / 2 <= EIGEN_UNROLLING_LIMIT }; - ei_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols()); + eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols()); - ei_triangular_assignment_selector + internal::triangular_assignment_selector template void TriangularBase::evalTo(MatrixBase &other) const { - if(ei_traits::Flags & EvalBeforeAssigningBit) + if(internal::traits::Flags & EvalBeforeAssigningBit) { - typename ei_plain_matrix_type::type other_evaluated(rows(), cols()); + typename internal::plain_matrix_type::type other_evaluated(rows(), cols()); evalToLazy(other_evaluated); other.derived().swap(other_evaluated); } @@ -603,14 +697,14 @@ void TriangularBase::evalToLazy(MatrixBase &other) const { enum { unroll = DenseDerived::SizeAtCompileTime != Dynamic - && ei_traits::CoeffReadCost != Dynamic - && DenseDerived::SizeAtCompileTime * ei_traits::CoeffReadCost / 2 + && internal::traits::CoeffReadCost != Dynamic + && DenseDerived::SizeAtCompileTime * internal::traits::CoeffReadCost / 2 <= EIGEN_UNROLLING_LIMIT }; - ei_assert(this->rows() == other.rows() && this->cols() == other.cols()); + other.derived().resize(this->rows(), this->cols()); - ei_triangular_assignment_selector - ::ExpressionType, Derived::Mode, + internal::triangular_assignment_selector + ::MatrixTypeNestedCleaned, Derived::Mode, unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic, true // clear the opposite triangular part >::run(other.derived(), derived().nestedExpression()); @@ -624,10 +718,28 @@ void TriangularBase::evalToLazy(MatrixBase &other) const * Implementation of MatrixBase methods ***************************************************************************/ +#ifdef EIGEN2_SUPPORT + +// implementation of part<>(), including the SelfAdjoint case. + +namespace internal { +template +struct eigen2_part_return_type +{ + typedef TriangularView type; +}; + +template +struct eigen2_part_return_type +{ + typedef SelfAdjointView type; +}; +} + /** \deprecated use MatrixBase::triangularView() */ template template -EIGEN_DEPRECATED const TriangularView MatrixBase::part() const +const typename internal::eigen2_part_return_type::type MatrixBase::part() const { return derived(); } @@ -635,10 +747,11 @@ EIGEN_DEPRECATED const TriangularView MatrixBase::part() /** \deprecated use MatrixBase::triangularView() */ template template -EIGEN_DEPRECATED TriangularView MatrixBase::part() +typename internal::eigen2_part_return_type::type MatrixBase::part() { return derived(); } +#endif /** * \returns an expression of a triangular view extracted from the current matrix @@ -653,7 +766,8 @@ EIGEN_DEPRECATED TriangularView MatrixBase::part() */ template template -TriangularView MatrixBase::triangularView() +typename MatrixBase::template TriangularViewReturnType::Type +MatrixBase::triangularView() { return derived(); } @@ -661,7 +775,8 @@ TriangularView MatrixBase::triangularView() /** This is the const version of MatrixBase::triangularView() */ template template -const TriangularView MatrixBase::triangularView() const +typename MatrixBase::template ConstTriangularViewReturnType::Type +MatrixBase::triangularView() const { return derived(); } @@ -669,7 +784,7 @@ const TriangularView MatrixBase::triangularView() const /** \returns true if *this is approximately equal to an upper triangular matrix, * within the precision given by \a prec. * - * \sa isLowerTriangular(), extract(), part(), marked() + * \sa isLowerTriangular() */ template bool MatrixBase::isUpperTriangular(RealScalar prec) const @@ -680,21 +795,21 @@ bool MatrixBase::isUpperTriangular(RealScalar prec) const Index maxi = std::min(j, rows()-1); for(Index i = 0; i <= maxi; ++i) { - RealScalar absValue = ei_abs(coeff(i,j)); + RealScalar absValue = internal::abs(coeff(i,j)); if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; } } RealScalar threshold = maxAbsOnUpperPart * prec; for(Index j = 0; j < cols(); ++j) for(Index i = j+1; i < rows(); ++i) - if(ei_abs(coeff(i, j)) > threshold) return false; + if(internal::abs(coeff(i, j)) > threshold) return false; return true; } /** \returns true if *this is approximately equal to a lower triangular matrix, * within the precision given by \a prec. * - * \sa isUpperTriangular(), extract(), part(), marked() + * \sa isUpperTriangular() */ template bool MatrixBase::isLowerTriangular(RealScalar prec) const @@ -703,7 +818,7 @@ bool MatrixBase::isLowerTriangular(RealScalar prec) const for(Index j = 0; j < cols(); ++j) for(Index i = j; i < rows(); ++i) { - RealScalar absValue = ei_abs(coeff(i,j)); + RealScalar absValue = internal::abs(coeff(i,j)); if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; } RealScalar threshold = maxAbsOnLowerPart * prec; @@ -711,7 +826,7 @@ bool MatrixBase::isLowerTriangular(RealScalar prec) const { Index maxi = std::min(j, rows()-1); for(Index i = 0; i < maxi; ++i) - if(ei_abs(coeff(i, j)) > threshold) return false; + if(internal::abs(coeff(i, j)) > threshold) return false; } return true; } diff --git a/gtsam/3rdparty/Eigen/src/Core/VectorBlock.h b/gtsam/3rdparty/Eigen/src/Core/VectorBlock.h index 84040bca1..858e4c786 100644 --- a/gtsam/3rdparty/Eigen/src/Core/VectorBlock.h +++ b/gtsam/3rdparty/Eigen/src/Core/VectorBlock.h @@ -56,24 +56,27 @@ * * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index) */ + +namespace internal { template -struct ei_traits > - : public ei_traits::Flags & RowMajorBit ? 1 : Size, - ei_traits::Flags & RowMajorBit ? Size : 1> > +struct traits > + : public traits::Flags & RowMajorBit ? 1 : Size, + traits::Flags & RowMajorBit ? Size : 1> > { }; +} template class VectorBlock : public Block::Flags & RowMajorBit ? 1 : Size, - ei_traits::Flags & RowMajorBit ? Size : 1> + internal::traits::Flags & RowMajorBit ? 1 : Size, + internal::traits::Flags & RowMajorBit ? Size : 1> { typedef Block::Flags & RowMajorBit ? 1 : Size, - ei_traits::Flags & RowMajorBit ? Size : 1> Base; + internal::traits::Flags & RowMajorBit ? 1 : Size, + internal::traits::Flags & RowMajorBit ? Size : 1> Base; enum { - IsColVector = !(ei_traits::Flags & RowMajorBit) + IsColVector = !(internal::traits::Flags & RowMajorBit) }; public: EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock) @@ -82,7 +85,7 @@ template class VectorBlock /** Dynamic-size constructor */ - inline VectorBlock(const VectorType& vector, Index start, Index size) + inline VectorBlock(VectorType& vector, Index start, Index size) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start, IsColVector ? size : 1, IsColVector ? 1 : size) @@ -92,7 +95,7 @@ template class VectorBlock /** Fixed-size constructor */ - inline VectorBlock(const VectorType& vector, Index start) + inline VectorBlock(VectorType& vector, Index start) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); @@ -117,20 +120,20 @@ template class VectorBlock * \sa class Block, segment(Index) */ template -inline VectorBlock DenseBase - ::segment(Index start, Index size) +inline typename DenseBase::SegmentReturnType +DenseBase::segment(Index start, Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), start, size); + return SegmentReturnType(derived(), start, size); } /** This is the const version of segment(Index,Index).*/ template -inline const VectorBlock +inline typename DenseBase::ConstSegmentReturnType DenseBase::segment(Index start, Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), start, size); + return ConstSegmentReturnType(derived(), start, size); } /** \returns a dynamic-size expression of the first coefficients of *this. @@ -149,20 +152,20 @@ DenseBase::segment(Index start, Index size) const * \sa class Block, block(Index,Index) */ template -inline VectorBlock +inline typename DenseBase::SegmentReturnType DenseBase::head(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), 0, size); + return SegmentReturnType(derived(), 0, size); } /** This is the const version of head(Index).*/ template -inline const VectorBlock +inline typename DenseBase::ConstSegmentReturnType DenseBase::head(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), 0, size); + return ConstSegmentReturnType(derived(), 0, size); } /** \returns a dynamic-size expression of the last coefficients of *this. @@ -181,20 +184,20 @@ DenseBase::head(Index size) const * \sa class Block, block(Index,Index) */ template -inline VectorBlock +inline typename DenseBase::SegmentReturnType DenseBase::tail(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), this->size() - size, size); + return SegmentReturnType(derived(), this->size() - size, size); } /** This is the const version of tail(Index).*/ template -inline const VectorBlock +inline typename DenseBase::ConstSegmentReturnType DenseBase::tail(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), this->size() - size, size); + return ConstSegmentReturnType(derived(), this->size() - size, size); } /** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this @@ -212,21 +215,21 @@ DenseBase::tail(Index size) const */ template template -inline VectorBlock +inline typename DenseBase::template FixedSegmentReturnType::Type DenseBase::segment(Index start) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), start); + return typename FixedSegmentReturnType::Type(derived(), start); } /** This is the const version of segment(Index).*/ template template -inline const VectorBlock +inline typename DenseBase::template ConstFixedSegmentReturnType::Type DenseBase::segment(Index start) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), start); + return typename ConstFixedSegmentReturnType::Type(derived(), start); } /** \returns a fixed-size expression of the first coefficients of *this. @@ -242,21 +245,21 @@ DenseBase::segment(Index start) const */ template template -inline VectorBlock +inline typename DenseBase::template FixedSegmentReturnType::Type DenseBase::head() { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), 0); + return typename FixedSegmentReturnType::Type(derived(), 0); } /** This is the const version of head().*/ template template -inline const VectorBlock +inline typename DenseBase::template ConstFixedSegmentReturnType::Type DenseBase::head() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), 0); + return typename ConstFixedSegmentReturnType::Type(derived(), 0); } /** \returns a fixed-size expression of the last coefficients of *this. @@ -272,21 +275,21 @@ DenseBase::head() const */ template template -inline VectorBlock +inline typename DenseBase::template FixedSegmentReturnType::Type DenseBase::tail() { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), size() - Size); + return typename FixedSegmentReturnType::Type(derived(), size() - Size); } /** This is the const version of tail.*/ template template -inline const VectorBlock +inline typename DenseBase::template ConstFixedSegmentReturnType::Type DenseBase::tail() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), size() - Size); + return typename ConstFixedSegmentReturnType::Type(derived(), size() - Size); } diff --git a/gtsam/3rdparty/Eigen/src/Core/VectorwiseOp.h b/gtsam/3rdparty/Eigen/src/Core/VectorwiseOp.h index 7f7ab842e..e328d94aa 100644 --- a/gtsam/3rdparty/Eigen/src/Core/VectorwiseOp.h +++ b/gtsam/3rdparty/Eigen/src/Core/VectorwiseOp.h @@ -45,16 +45,17 @@ template< typename MatrixType, typename MemberOp, int Direction> class PartialReduxExpr; +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { typedef typename MemberOp::result_type Scalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::XprKind XprKind; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; typedef typename MatrixType::Scalar InputScalar; - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_cleantype::type _MatrixTypeNested; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_all::type _MatrixTypeNested; enum { RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime, ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime, @@ -70,20 +71,21 @@ struct ei_traits > typedef typename MemberOp::template Cost CostOpType; #endif enum { - CoeffReadCost = TraversalSize * ei_traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value) + CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value) }; }; +} template< typename MatrixType, typename MemberOp, int Direction> -class PartialReduxExpr : ei_no_assignment_operator, - public ei_dense_xpr_base< PartialReduxExpr >::type +class PartialReduxExpr : internal::no_assignment_operator, + public internal::dense_xpr_base< PartialReduxExpr >::type { public: - typedef typename ei_dense_xpr_base::type Base; + typedef typename internal::dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr) - typedef typename ei_traits::MatrixTypeNested MatrixTypeNested; - typedef typename ei_traits::_MatrixTypeNested _MatrixTypeNested; + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp()) : m_matrix(mat), m_functor(func) {} @@ -114,8 +116,8 @@ class PartialReduxExpr : ei_no_assignment_operator, #define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \ template \ - struct ei_member_##MEMBER { \ - EIGEN_EMPTY_STRUCT_CTOR(ei_member_##MEMBER) \ + struct member_##MEMBER { \ + EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \ typedef ResultType result_type; \ template struct Cost \ { enum { value = COST }; }; \ @@ -124,11 +126,13 @@ class PartialReduxExpr : ei_no_assignment_operator, { return mat.MEMBER(); } \ } +namespace internal { + EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); EIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); EIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); -EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * ei_functor_traits >::Cost ); +EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits >::Cost ); EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits::AddCost); EIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits::AddCost + NumTraits::MulCost); EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits::AddCost); @@ -139,20 +143,20 @@ EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits::AddCost); EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits::MulCost); -/** \internal */ template -struct ei_member_redux { - typedef typename ei_result_of< +struct member_redux { + typedef typename result_of< BinaryOp(Scalar) >::type result_type; template struct Cost - { enum { value = (Size-1) * ei_functor_traits::Cost }; }; - ei_member_redux(const BinaryOp func) : m_functor(func) {} + { enum { value = (Size-1) * functor_traits::Cost }; }; + member_redux(const BinaryOp func) : m_functor(func) {} template inline result_type operator()(const DenseBase& mat) const { return mat.redux(m_functor); } const BinaryOp m_functor; }; +} /** \class VectorwiseOp * \ingroup Core_Module @@ -178,11 +182,12 @@ template class VectorwiseOp typedef typename ExpressionType::Scalar Scalar; typedef typename ExpressionType::RealScalar RealScalar; typedef typename ExpressionType::Index Index; - typedef typename ei_meta_if::ret, - ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + typedef typename internal::conditional::ret, + ExpressionType, ExpressionType&>::type ExpressionTypeNested; + typedef typename internal::remove_all::type ExpressionTypeNestedCleaned; template class Functor, - typename Scalar=typename ei_traits::Scalar> struct ReturnType + typename Scalar=typename internal::traits::Scalar> struct ReturnType { typedef PartialReduxExpr, @@ -193,7 +198,7 @@ template class VectorwiseOp template struct ReduxReturnType { typedef PartialReduxExpr::Scalar>, + internal::member_redux::Scalar>, Direction > Type; }; @@ -207,9 +212,9 @@ template class VectorwiseOp /** \internal * \returns the i-th subvector according to the \c Direction */ - typedef typename ei_meta_if::ret SubVector; + typename ExpressionType::RowXpr>::type SubVector; SubVector subVector(Index i) { return SubVector(m_matrix.derived(),i); @@ -241,7 +246,7 @@ template class VectorwiseOp public: - inline VectorwiseOp(const ExpressionType& matrix) : m_matrix(matrix) {} + inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {} /** \internal */ inline const ExpressionType& _expression() const { return m_matrix; } @@ -265,7 +270,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_minCoeff.out * * \sa DenseBase::minCoeff() */ - const typename ReturnType::Type minCoeff() const + const typename ReturnType::Type minCoeff() const { return _expression(); } /** \returns a row (or column) vector expression of the largest coefficient @@ -275,7 +280,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_maxCoeff.out * * \sa DenseBase::maxCoeff() */ - const typename ReturnType::Type maxCoeff() const + const typename ReturnType::Type maxCoeff() const { return _expression(); } /** \returns a row (or column) vector expression of the squared norm @@ -285,7 +290,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_squaredNorm.out * * \sa DenseBase::squaredNorm() */ - const typename ReturnType::Type squaredNorm() const + const typename ReturnType::Type squaredNorm() const { return _expression(); } /** \returns a row (or column) vector expression of the norm @@ -295,7 +300,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_norm.out * * \sa DenseBase::norm() */ - const typename ReturnType::Type norm() const + const typename ReturnType::Type norm() const { return _expression(); } @@ -304,7 +309,7 @@ template class VectorwiseOp * blue's algorithm. * * \sa DenseBase::blueNorm() */ - const typename ReturnType::Type blueNorm() const + const typename ReturnType::Type blueNorm() const { return _expression(); } @@ -313,7 +318,7 @@ template class VectorwiseOp * underflow and overflow. * * \sa DenseBase::stableNorm() */ - const typename ReturnType::Type stableNorm() const + const typename ReturnType::Type stableNorm() const { return _expression(); } @@ -322,7 +327,7 @@ template class VectorwiseOp * underflow and overflow using a concatenation of hypot() calls. * * \sa DenseBase::hypotNorm() */ - const typename ReturnType::Type hypotNorm() const + const typename ReturnType::Type hypotNorm() const { return _expression(); } /** \returns a row (or column) vector expression of the sum @@ -332,28 +337,28 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_sum.out * * \sa DenseBase::sum() */ - const typename ReturnType::Type sum() const + const typename ReturnType::Type sum() const { return _expression(); } /** \returns a row (or column) vector expression of the mean * of each column (or row) of the referenced expression. * * \sa DenseBase::mean() */ - const typename ReturnType::Type mean() const + const typename ReturnType::Type mean() const { return _expression(); } /** \returns a row (or column) vector expression representing * whether \b all coefficients of each respective column (or row) are \c true. * * \sa DenseBase::all() */ - const typename ReturnType::Type all() const + const typename ReturnType::Type all() const { return _expression(); } /** \returns a row (or column) vector expression representing * whether \b at \b least one coefficient of each respective column (or row) is \c true. * * \sa DenseBase::any() */ - const typename ReturnType::Type any() const + const typename ReturnType::Type any() const { return _expression(); } /** \returns a row (or column) vector expression representing @@ -363,7 +368,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_count.out * * \sa DenseBase::count() */ - const PartialReduxExpr, Direction> count() const + const PartialReduxExpr, Direction> count() const { return _expression(); } /** \returns a row (or column) vector expression of the product @@ -373,7 +378,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_prod.out * * \sa DenseBase::prod() */ - const typename ReturnType::Type prod() const + const typename ReturnType::Type prod() const { return _expression(); } @@ -413,7 +418,7 @@ template class VectorwiseOp ExpressionType& operator=(const DenseBase& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) - //ei_assert((m_matrix.isNull()) == (other.isNull())); FIXME + //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME for(Index j=0; j(m_matrix); @@ -440,10 +445,10 @@ template class VectorwiseOp } /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */ - template EIGEN_STRONG_INLINE - CwiseBinaryOp, - ExpressionType, - typename ExtendedType::Type> + template EIGEN_STRONG_INLINE + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> operator+(const DenseBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived); @@ -452,9 +457,9 @@ template class VectorwiseOp /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */ template - CwiseBinaryOp, - ExpressionType, - typename ExtendedType::Type> + CwiseBinaryOp, + const ExpressionTypeNestedCleaned, + const typename ExtendedType::Type> operator-(const DenseBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived); @@ -463,35 +468,37 @@ template class VectorwiseOp /////////// Geometry module /////////// + #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS Homogeneous homogeneous() const; + #endif typedef typename ExpressionType::PlainObject CrossReturnType; template const CrossReturnType cross(const MatrixBase& other) const; enum { - HNormalized_Size = Direction==Vertical ? ei_traits::RowsAtCompileTime - : ei_traits::ColsAtCompileTime, + HNormalized_Size = Direction==Vertical ? internal::traits::RowsAtCompileTime + : internal::traits::ColsAtCompileTime, HNormalized_SizeMinusOne = HNormalized_Size==Dynamic ? Dynamic : HNormalized_Size-1 }; - typedef Block::RowsAtCompileTime), + : int(internal::traits::RowsAtCompileTime), Direction==Horizontal ? int(HNormalized_SizeMinusOne) - : int(ei_traits::ColsAtCompileTime)> + : int(internal::traits::ColsAtCompileTime)> HNormalized_Block; - typedef Block::RowsAtCompileTime), - Direction==Horizontal ? 1 : int(ei_traits::ColsAtCompileTime)> + typedef Block::RowsAtCompileTime), + Direction==Horizontal ? 1 : int(internal::traits::ColsAtCompileTime)> HNormalized_Factors; - typedef CwiseBinaryOp::Scalar>, - HNormalized_Block, - Replicate::Scalar>, + const HNormalized_Block, + const Replicate > HNormalizedReturnType; - HNormalizedReturnType hnormalized() const; + const HNormalizedReturnType hnormalized() const; protected: ExpressionTypeNested m_matrix; @@ -505,7 +512,7 @@ template class VectorwiseOp * \sa rowwise(), class VectorwiseOp */ template -inline const VectorwiseOp +inline const typename DenseBase::ConstColwiseReturnType DenseBase::colwise() const { return derived(); @@ -516,7 +523,7 @@ DenseBase::colwise() const * \sa rowwise(), class VectorwiseOp */ template -inline VectorwiseOp +inline typename DenseBase::ColwiseReturnType DenseBase::colwise() { return derived(); @@ -530,7 +537,7 @@ DenseBase::colwise() * \sa colwise(), class VectorwiseOp */ template -inline const VectorwiseOp +inline const typename DenseBase::ConstRowwiseReturnType DenseBase::rowwise() const { return derived(); @@ -541,7 +548,7 @@ DenseBase::rowwise() const * \sa colwise(), class VectorwiseOp */ template -inline VectorwiseOp +inline typename DenseBase::RowwiseReturnType DenseBase::rowwise() { return derived(); diff --git a/gtsam/3rdparty/Eigen/src/Core/Visitor.h b/gtsam/3rdparty/Eigen/src/Core/Visitor.h index db6fac4b1..378ebcba1 100644 --- a/gtsam/3rdparty/Eigen/src/Core/Visitor.h +++ b/gtsam/3rdparty/Eigen/src/Core/Visitor.h @@ -25,8 +25,10 @@ #ifndef EIGEN_VISITOR_H #define EIGEN_VISITOR_H +namespace internal { + template -struct ei_visitor_impl +struct visitor_impl { enum { col = (UnrollCount-1) / Derived::RowsAtCompileTime, @@ -35,13 +37,13 @@ struct ei_visitor_impl inline static void run(const Derived &mat, Visitor& visitor) { - ei_visitor_impl::run(mat, visitor); + visitor_impl::run(mat, visitor); visitor(mat.coeff(row, col), row, col); } }; template -struct ei_visitor_impl +struct visitor_impl { inline static void run(const Derived &mat, Visitor& visitor) { @@ -50,7 +52,7 @@ struct ei_visitor_impl }; template -struct ei_visitor_impl +struct visitor_impl { typedef typename Derived::Index Index; inline static void run(const Derived& mat, Visitor& visitor) @@ -64,6 +66,7 @@ struct ei_visitor_impl } }; +} // end namespace internal /** Applies the visitor \a visitor to the whole coefficients of the matrix or vector. * @@ -88,19 +91,21 @@ void DenseBase::visit(Visitor& visitor) const { enum { unroll = SizeAtCompileTime != Dynamic && CoeffReadCost != Dynamic - && (SizeAtCompileTime == 1 || ei_functor_traits::Cost != Dynamic) - && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * ei_functor_traits::Cost + && (SizeAtCompileTime == 1 || internal::functor_traits::Cost != Dynamic) + && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits::Cost <= EIGEN_UNROLLING_LIMIT }; - return ei_visitor_impl::run(derived(), visitor); } +namespace internal { + /** \internal * \brief Base class to implement min and max visitors */ template -struct ei_coeff_visitor +struct coeff_visitor { typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; @@ -120,7 +125,7 @@ struct ei_coeff_visitor * \sa DenseBase::minCoeff(Index*, Index*) */ template -struct ei_min_coeff_visitor : ei_coeff_visitor +struct min_coeff_visitor : coeff_visitor { typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; @@ -136,7 +141,7 @@ struct ei_min_coeff_visitor : ei_coeff_visitor }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = NumTraits::AddCost }; @@ -148,7 +153,7 @@ struct ei_functor_traits > { * \sa DenseBase::maxCoeff(Index*, Index*) */ template -struct ei_max_coeff_visitor : ei_coeff_visitor +struct max_coeff_visitor : coeff_visitor { typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; @@ -164,22 +169,25 @@ struct ei_max_coeff_visitor : ei_coeff_visitor }; template -struct ei_functor_traits > { +struct functor_traits > { enum { Cost = NumTraits::AddCost }; }; +} // end namespace internal + /** \returns the minimum of all coefficients of *this * and puts in *row and *col its location. * * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff() */ template -typename ei_traits::Scalar -DenseBase::minCoeff(Index* row, Index* col) const +template +typename internal::traits::Scalar +DenseBase::minCoeff(IndexType* row, IndexType* col) const { - ei_min_coeff_visitor minVisitor; + internal::min_coeff_visitor minVisitor; this->visit(minVisitor); *row = minVisitor.row; if (col) *col = minVisitor.col; @@ -189,14 +197,15 @@ DenseBase::minCoeff(Index* row, Index* col) const /** \returns the minimum of all coefficients of *this * and puts in *index its location. * - * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff() + * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff() */ template -typename ei_traits::Scalar -DenseBase::minCoeff(Index* index) const +template +typename internal::traits::Scalar +DenseBase::minCoeff(IndexType* index) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - ei_min_coeff_visitor minVisitor; + internal::min_coeff_visitor minVisitor; this->visit(minVisitor); *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row; return minVisitor.res; @@ -205,13 +214,14 @@ DenseBase::minCoeff(Index* index) const /** \returns the maximum of all coefficients of *this * and puts in *row and *col its location. * - * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff() + * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff() */ template -typename ei_traits::Scalar -DenseBase::maxCoeff(Index* row, Index* col) const +template +typename internal::traits::Scalar +DenseBase::maxCoeff(IndexType* row, IndexType* col) const { - ei_max_coeff_visitor maxVisitor; + internal::max_coeff_visitor maxVisitor; this->visit(maxVisitor); *row = maxVisitor.row; if (col) *col = maxVisitor.col; @@ -221,14 +231,15 @@ DenseBase::maxCoeff(Index* row, Index* col) const /** \returns the maximum of all coefficients of *this * and puts in *index its location. * - * \sa DenseBase::maxCoeff(Index*,Index*), DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff() + * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff() */ template -typename ei_traits::Scalar -DenseBase::maxCoeff(Index* index) const +template +typename internal::traits::Scalar +DenseBase::maxCoeff(IndexType* index) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - ei_max_coeff_visitor maxVisitor; + internal::max_coeff_visitor maxVisitor; this->visit(maxVisitor); *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row; return maxVisitor.res; diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/Complex.h b/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/Complex.h index ecada02f4..f8adf1b63 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/Complex.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/Complex.h @@ -25,13 +25,15 @@ #ifndef EIGEN_COMPLEX_ALTIVEC_H #define EIGEN_COMPLEX_ALTIVEC_H -static Packet4ui ei_p4ui_CONJ_XOR = vec_mergeh((Packet4ui)ei_p4i_ZERO, (Packet4ui)ei_p4f_ZERO_);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; -static Packet16uc ei_p16uc_COMPLEX_RE = vec_sld((Packet16uc) vec_splat((Packet4ui)ei_p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)ei_p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; -static Packet16uc ei_p16uc_COMPLEX_IM = vec_sld((Packet16uc) vec_splat((Packet4ui)ei_p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)ei_p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; -static Packet16uc ei_p16uc_COMPLEX_REV = vec_sld(ei_p16uc_REVERSE, ei_p16uc_REVERSE, 8);//{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 }; -static Packet16uc ei_p16uc_COMPLEX_REV2 = vec_sld(ei_p16uc_FORWARD, ei_p16uc_FORWARD, 8);//{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; -static Packet16uc ei_p16uc_PSET_HI = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)ei_p16uc_FORWARD, 0), (Packet4ui) vec_splat((Packet4ui)ei_p16uc_FORWARD, 1));//{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }; -static Packet16uc ei_p16uc_PSET_LO = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)ei_p16uc_FORWARD, 2), (Packet4ui) vec_splat((Packet4ui)ei_p16uc_FORWARD, 3));//{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 }; +namespace internal { + +static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; +static Packet16uc p16uc_COMPLEX_RE = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_COMPLEX_IM = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +static Packet16uc p16uc_COMPLEX_REV = vec_sld(p16uc_REVERSE, p16uc_REVERSE, 8);//{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 }; +static Packet16uc p16uc_COMPLEX_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8);//{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_PSET_HI = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 1));//{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_PSET_LO = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 2), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 3));//{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 }; //---------- float ---------- struct Packet2cf @@ -41,11 +43,12 @@ struct Packet2cf Packet4f v; }; -template<> struct ei_packet_traits > : ei_default_packet_traits +template<> struct packet_traits > : default_packet_traits { typedef Packet2cf type; enum { Vectorizable = 1, + AlignedOnScalar = 1, size = 2, HasAdd = 1, @@ -61,106 +64,109 @@ template<> struct ei_packet_traits > : ei_default_packet_tr }; }; -template<> struct ei_unpacket_traits { typedef std::complex type; enum {size=2}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; }; -template<> EIGEN_STRONG_INLINE Packet2cf ei_pset1(const std::complex& from) +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) { Packet2cf res; /* On AltiVec we cannot load 64-bit registers, so wa have to take care of alignment */ - if ((ptrdiff_t)&from % 16 == 0) { - res.v = ei_pload((const float *)&from); - res.v = vec_perm(res.v, res.v, ei_p16uc_PSET_HI); - } else { - res.v = ei_ploadu((const float *)&from); - res.v = vec_perm(res.v, res.v, ei_p16uc_PSET_LO); - } + if((ptrdiff_t(&from) % 16) == 0) + res.v = pload((const float *)&from); + else + res.v = ploadu((const float *)&from); + res.v = vec_perm(res.v, res.v, p16uc_PSET_HI); return res; } -template<> EIGEN_STRONG_INLINE Packet2cf ei_padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_add(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_sub(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pnegate(const Packet2cf& a) { return Packet2cf(ei_psub(ei_p4f_ZERO, a.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pconj(const Packet2cf& a) { return Packet2cf((Packet4f)vec_xor((Packet4ui)a.v, ei_p4ui_CONJ_XOR)); } +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_add(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_sub(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf((Packet4f)vec_xor((Packet4ui)a.v, p4ui_CONJ_XOR)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pmul(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) { Packet4f v1, v2; // Permute and multiply the real parts of a and b - v1 = vec_perm(a.v, a.v, ei_p16uc_COMPLEX_RE); + v1 = vec_perm(a.v, a.v, p16uc_COMPLEX_RE); // Get the imaginary parts of a - v2 = vec_perm(a.v, a.v, ei_p16uc_COMPLEX_IM); + v2 = vec_perm(a.v, a.v, p16uc_COMPLEX_IM); // multiply a_re * b - v1 = vec_madd(v1, b.v, ei_p4f_ZERO); + v1 = vec_madd(v1, b.v, p4f_ZERO); // multiply a_im * b and get the conjugate result - v2 = vec_madd(v2, b.v, ei_p4f_ZERO); - v2 = (Packet4f) vec_xor((Packet4ui)v2, ei_p4ui_CONJ_XOR); + v2 = vec_madd(v2, b.v, p4f_ZERO); + v2 = (Packet4f) vec_xor((Packet4ui)v2, p4ui_CONJ_XOR); // permute back to a proper order - v2 = vec_perm(v2, v2, ei_p16uc_COMPLEX_REV); + v2 = vec_perm(v2, v2, p16uc_COMPLEX_REV); return Packet2cf(vec_add(v1, v2)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_or(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_xor(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v, vec_nor(b.v,b.v))); } +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_or(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_xor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v, vec_nor(b.v,b.v))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pload >(const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(ei_pload((const float*)from)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_ploadu >(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ei_ploadu((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu((const float*)from)); } -template<> EIGEN_STRONG_INLINE void ei_pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE ei_pstore((float*)to, from.v); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE ei_pstoreu((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) +{ + return pset1(*from); +} -template<> EIGEN_STRONG_INLINE void ei_prefetch >(const std::complex * addr) { vec_dstt((float *)addr, DST_CTRL(2,2,32), DST_CHAN); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } -template<> EIGEN_STRONG_INLINE std::complex ei_pfirst(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { vec_dstt((float *)addr, DST_CTRL(2,2,32), DST_CHAN); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) { std::complex EIGEN_ALIGN16 res[2]; - ei_pstore((float *)&res, a.v); + pstore((float *)&res, a.v); return res[0]; } -template<> EIGEN_STRONG_INLINE Packet2cf ei_preverse(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { Packet4f rev_a; - rev_a = vec_perm(a.v, a.v, ei_p16uc_COMPLEX_REV2); + rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX_REV2); return Packet2cf(rev_a); } -template<> EIGEN_STRONG_INLINE std::complex ei_predux(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) { Packet4f b; b = (Packet4f) vec_sld(a.v, a.v, 8); - b = ei_padd(a.v, b); - return ei_pfirst(Packet2cf(sum)); + b = padd(a.v, b); + return pfirst(Packet2cf(b)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_preduxp(const Packet2cf* vecs) +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) { Packet4f b1, b2; b1 = (Packet4f) vec_sld(vecs[0].v, vecs[1].v, 8); b2 = (Packet4f) vec_sld(vecs[1].v, vecs[0].v, 8); b2 = (Packet4f) vec_sld(b2, b2, 8); - b2 = ei_padd(b1, b2); + b2 = padd(b1, b2); return Packet2cf(b2); } -template<> EIGEN_STRONG_INLINE std::complex ei_predux_mul(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) { Packet4f b; Packet2cf prod; b = (Packet4f) vec_sld(a.v, a.v, 8); - prod = ei_pmul(a, Packet2cf(b)); + prod = pmul(a, Packet2cf(b)); - return ei_pfirst(prod); + return pfirst(prod); } template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) { @@ -171,45 +177,52 @@ struct ei_palign_impl } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { - return ei_pmul(a, ei_pconj(b)); + return internal::pmul(a, pconj(b)); } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { - return ei_pmul(ei_pconj(a), b); + return internal::pmul(pconj(a), b); } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { - return ei_pconj(ei_pmul(a, b)); + return pconj(internal::pmul(a, b)); } }; -template<> EIGEN_STRONG_INLINE Packet2cf ei_pdiv(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) { // TODO optimize it for AltiVec - Packet2cf res = ei_conj_helper().pmul(a,b); - Packet4f s = vec_madd(b.v, b.v, ei_p4f_ZERO); - return Packet2cf(ei_pdiv(res.v, vec_add(s,vec_perm(s, s, ei_p16uc_COMPLEX_REV)))); + Packet2cf res = conj_helper().pmul(a,b); + Packet4f s = vec_madd(b.v, b.v, p4f_ZERO); + return Packet2cf(pdiv(res.v, vec_add(s,vec_perm(s, s, p16uc_COMPLEX_REV)))); } +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& x) +{ + return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX_REV)); +} + +} // end namespace internal + #endif // EIGEN_COMPLEX_ALTIVEC_H diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/PacketMath.h b/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/PacketMath.h index 8205beae5..dc34ebbd6 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/PacketMath.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -25,6 +25,8 @@ #ifndef EIGEN_PACKET_MATH_ALTIVEC_H #define EIGEN_PACKET_MATH_ALTIVEC_H +namespace internal { + #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 #endif @@ -33,10 +35,6 @@ #define EIGEN_HAS_FUSE_CJMADD 1 #endif -#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE -#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE 8*256*256 -#endif - // NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 @@ -53,38 +51,39 @@ typedef __vector unsigned char Packet16uc; // and it doesn't really work to declare them global, so we define macros instead #define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \ - Packet4f ei_p4f_##NAME = (Packet4f) vec_splat_s32(X) + Packet4f p4f_##NAME = (Packet4f) vec_splat_s32(X) #define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \ - Packet4i ei_p4i_##NAME = vec_splat_s32(X) + Packet4i p4i_##NAME = vec_splat_s32(X) #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ - Packet4f ei_p4f_##NAME = ei_pset1(X) + Packet4f p4f_##NAME = pset1(X) #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ - Packet4f ei_p4f_##NAME = vreinterpretq_f32_u32(ei_pset1(X)) + Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1(X)) #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ - Packet4i ei_p4i_##NAME = ei_pset1(X) + Packet4i p4i_##NAME = pset1(X) #define DST_CHAN 1 #define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride)) // Define global static constants: -static Packet4f ei_p4f_COUNTDOWN = { 3.0, 2.0, 1.0, 0.0 }; -static Packet4i ei_p4i_COUNTDOWN = { 3, 2, 1, 0 }; -static Packet16uc ei_p16uc_REVERSE = {12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3}; -static Packet16uc ei_p16uc_FORWARD = vec_lvsl(0, (float*)0); +static Packet4f p4f_COUNTDOWN = { 3.0, 2.0, 1.0, 0.0 }; +static Packet4i p4i_COUNTDOWN = { 3, 2, 1, 0 }; +static Packet16uc p16uc_REVERSE = {12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3}; +static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0); +static Packet16uc p16uc_DUPLICATE = {0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7}; static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1); static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16); static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); -static Packet4f ei_p4f_ONE = vec_ctf(ei_p4i_ONE, 0); -static Packet4f ei_p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)ei_p4i_MINUS1, (Packet4ui)ei_p4i_MINUS1); +static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0); +static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1); -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet4f type; enum { @@ -100,7 +99,7 @@ template<> struct ei_packet_traits : ei_default_packet_traits HasSqrt = 0 }; }; -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet4i type; enum { @@ -111,8 +110,8 @@ template<> struct ei_packet_traits : ei_default_packet_traits }; }; -template<> struct ei_unpacket_traits { typedef float type; enum {size=4}; }; -template<> struct ei_unpacket_traits { typedef int type; enum {size=4}; }; +template<> struct unpacket_traits { typedef float type; enum {size=4}; }; +template<> struct unpacket_traits { typedef int type; enum {size=4}; }; /* inline std::ostream & operator <<(std::ostream & s, const Packet4f & v) { @@ -158,7 +157,7 @@ inline std::ostream & operator <<(std::ostream & s, const Packetbi & v) return s; } */ -template<> EIGEN_STRONG_INLINE Packet4f ei_pset1(const float& from) { +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html float EIGEN_ALIGN16 af[4]; af[0] = from; @@ -167,7 +166,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_pset1(const float& from) { return vc; } -template<> EIGEN_STRONG_INLINE Packet4i ei_pset1(const int& from) { +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { int EIGEN_ALIGN16 ai[4]; ai[0] = from; Packet4i vc = vec_ld(0, ai); @@ -175,22 +174,22 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_pset1(const int& from) return vc; } -template<> EIGEN_STRONG_INLINE Packet4f ei_plset(const float& a) { return vec_add(ei_pset1(a), ei_p4f_COUNTDOWN); } -template<> EIGEN_STRONG_INLINE Packet4i ei_plset(const int& a) { return vec_add(ei_pset1(a), ei_p4i_COUNTDOWN); } +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return vec_add(pset1(a), p4f_COUNTDOWN); } +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return vec_add(pset1(a), p4i_COUNTDOWN); } -template<> EIGEN_STRONG_INLINE Packet4f ei_padd(const Packet4f& a, const Packet4f& b) { return vec_add(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_padd(const Packet4i& a, const Packet4i& b) { return vec_add(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return vec_add(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return vec_add(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_psub(const Packet4f& a, const Packet4f& b) { return vec_sub(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_psub(const Packet4i& a, const Packet4i& b) { return vec_sub(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return vec_sub(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return vec_sub(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pnegate(const Packet4f& a) { return ei_psub(ei_p4f_ZERO, a); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pnegate(const Packet4i& a) { return ei_psub(ei_p4i_ZERO, a); } +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub(p4f_ZERO, a); } +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub(p4i_ZERO, a); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmul(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,ei_p4f_ZERO); } +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); } /* Commented out: it's actually slower than processing it scalar * -template<> EIGEN_STRONG_INLINE Packet4i ei_pmul(const Packet4i& a, const Packet4i& b) +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec //Set up constants, variables @@ -201,21 +200,21 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_pmul(const Packet4i& a, con b1 = vec_abs(b); // Get the signs using xor - Packet4bi sgn = (Packet4bi) vec_cmplt(vec_xor(a, b), ei_p4i_ZERO); + Packet4bi sgn = (Packet4bi) vec_cmplt(vec_xor(a, b), p4i_ZERO); // Do the multiplication for the asbolute values. - bswap = (Packet4i) vec_rl((Packet4ui) b1, (Packet4ui) ei_p4i_MINUS16 ); + bswap = (Packet4i) vec_rl((Packet4ui) b1, (Packet4ui) p4i_MINUS16 ); low_prod = vec_mulo((Packet8i) a1, (Packet8i)b1); - high_prod = vec_msum((Packet8i) a1, (Packet8i) bswap, ei_p4i_ZERO); - high_prod = (Packet4i) vec_sl((Packet4ui) high_prod, (Packet4ui) ei_p4i_MINUS16); + high_prod = vec_msum((Packet8i) a1, (Packet8i) bswap, p4i_ZERO); + high_prod = (Packet4i) vec_sl((Packet4ui) high_prod, (Packet4ui) p4i_MINUS16); prod = vec_add( low_prod, high_prod ); // NOR the product and select only the negative elements according to the sign mask prod_ = vec_nor(prod, prod); - prod_ = vec_sel(ei_p4i_ZERO, prod_, sgn); + prod_ = vec_sel(p4i_ZERO, prod_, sgn); // Add 1 to the result to get the negative numbers - v1sel = vec_sel(ei_p4i_ZERO, ei_p4i_ONE, sgn); + v1sel = vec_sel(p4i_ZERO, p4i_ONE, sgn); prod_ = vec_add(prod_, v1sel); // Merge the results back to the final vector. @@ -224,7 +223,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_pmul(const Packet4i& a, con return prod; } */ -template<> EIGEN_STRONG_INLINE Packet4f ei_pdiv(const Packet4f& a, const Packet4f& b) +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { Packet4f t, y_0, y_1, res; @@ -232,45 +231,45 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_pdiv(const Packet4f& a, con y_0 = vec_re(b); // Do one Newton-Raphson iteration to get the needed accuracy - t = vec_nmsub(y_0, b, ei_p4f_ONE); + t = vec_nmsub(y_0, b, p4f_ONE); y_1 = vec_madd(y_0, t, y_0); - res = vec_madd(a, y_1, ei_p4f_ZERO); + res = vec_madd(a, y_1, p4f_ZERO); return res; } -template<> EIGEN_STRONG_INLINE Packet4i ei_pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) -{ ei_assert(false && "packet integer division are not supported by AltiVec"); - return ei_pset1(0); +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by AltiVec"); + return pset1(0); } // for some weird raisons, it has to be overloaded for packet of integers -template<> EIGEN_STRONG_INLINE Packet4f ei_pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return ei_padd(ei_pmul(a,b), c); } +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmin(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmin(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmax(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmax(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); } // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics -template<> EIGEN_STRONG_INLINE Packet4f ei_pand(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pand(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_por(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_por(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pxor(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pxor(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pandnot(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pandnot(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); } +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); } -template<> EIGEN_STRONG_INLINE Packet4f ei_ploadu(const float* from) +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html @@ -282,7 +281,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_ploadu(const float* from) return (Packet4f) vec_perm(MSQ, LSQ, mask); // align the data } -template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html @@ -294,10 +293,25 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data } -template<> EIGEN_STRONG_INLINE void ei_pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); } -template<> EIGEN_STRONG_INLINE void ei_pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); } +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + Packet4f p; + if((ptrdiff_t(&from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE); +} +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + Packet4i p; + if((ptrdiff_t(&from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE); +} -template<> EIGEN_STRONG_INLINE void ei_pstoreu(float* to, const Packet4f& from) +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); } + +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html @@ -315,7 +329,7 @@ template<> EIGEN_STRONG_INLINE void ei_pstoreu(float* to, const Packet4f vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part } -template<> EIGEN_STRONG_INLINE void ei_pstoreu(int* to, const Packet4i& from) +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html @@ -334,29 +348,29 @@ template<> EIGEN_STRONG_INLINE void ei_pstoreu(int* to, const Packet4i vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const float* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const int* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); } +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); } -template<> EIGEN_STRONG_INLINE float ei_pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; } -template<> EIGEN_STRONG_INLINE int ei_pfirst(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; } -template<> EIGEN_STRONG_INLINE Packet4f ei_preverse(const Packet4f& a) { return (Packet4f)vec_perm((Packet16uc)a,(Packet16uc)a, ei_p16uc_REVERSE); } -template<> EIGEN_STRONG_INLINE Packet4i ei_preverse(const Packet4i& a) { return (Packet4i)vec_perm((Packet16uc)a,(Packet16uc)a, ei_p16uc_REVERSE); } +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return (Packet4f)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); } +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return (Packet4i)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pabs(const Packet4f& a) { return vec_abs(a); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pabs(const Packet4i& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); } -template<> EIGEN_STRONG_INLINE float ei_predux(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { Packet4f b, sum; b = (Packet4f) vec_sld(a, a, 8); sum = vec_add(a, b); b = (Packet4f) vec_sld(sum, sum, 4); sum = vec_add(sum, b); - return ei_pfirst(sum); + return pfirst(sum); } -template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vecs) +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) { Packet4f v[4], sum[4]; @@ -384,15 +398,15 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vec return sum[0]; } -template<> EIGEN_STRONG_INLINE int ei_predux(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) { Packet4i sum; - sum = vec_sums(a, ei_p4i_ZERO); - sum = vec_sld(sum, ei_p4i_ZERO, 12); - return ei_pfirst(sum); + sum = vec_sums(a, p4i_ZERO); + sum = vec_sld(sum, p4i_ZERO, 12); + return pfirst(sum); } -template<> EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vecs) +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) { Packet4i v[4], sum[4]; @@ -422,56 +436,56 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vec // Other reduction functions: // mul -template<> EIGEN_STRONG_INLINE float ei_predux_mul(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) { Packet4f prod; - prod = ei_pmul(a, (Packet4f)vec_sld(a, a, 8)); - return ei_pfirst(ei_pmul(prod, (Packet4f)vec_sld(prod, prod, 4))); + prod = pmul(a, (Packet4f)vec_sld(a, a, 8)); + return pfirst(pmul(prod, (Packet4f)vec_sld(prod, prod, 4))); } -template<> EIGEN_STRONG_INLINE int ei_predux_mul(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) { EIGEN_ALIGN16 int aux[4]; - ei_pstore(aux, a); + pstore(aux, a); return aux[0] * aux[1] * aux[2] * aux[3]; } // min -template<> EIGEN_STRONG_INLINE float ei_predux_min(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) { Packet4f b, res; b = vec_min(a, vec_sld(a, a, 8)); res = vec_min(b, vec_sld(b, b, 4)); - return ei_pfirst(res); + return pfirst(res); } -template<> EIGEN_STRONG_INLINE int ei_predux_min(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) { Packet4i b, res; b = vec_min(a, vec_sld(a, a, 8)); res = vec_min(b, vec_sld(b, b, 4)); - return ei_pfirst(res); + return pfirst(res); } // max -template<> EIGEN_STRONG_INLINE float ei_predux_max(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) { Packet4f b, res; b = vec_max(a, vec_sld(a, a, 8)); res = vec_max(b, vec_sld(b, b, 4)); - return ei_pfirst(res); + return pfirst(res); } -template<> EIGEN_STRONG_INLINE int ei_predux_max(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) { Packet4i b, res; b = vec_max(a, vec_sld(a, a, 8)); res = vec_max(b, vec_sld(b, b, 4)); - return ei_pfirst(res); + return pfirst(res); } template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) { @@ -481,7 +495,7 @@ struct ei_palign_impl }; template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) { @@ -489,4 +503,7 @@ struct ei_palign_impl first = vec_sld(first, second, Offset*4); } }; + +} // end namespace internal + #endif // EIGEN_PACKET_MATH_ALTIVEC_H diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/Default/Settings.h b/gtsam/3rdparty/Eigen/src/Core/arch/Default/Settings.h index ca777f965..957adc8fe 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/Default/Settings.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/Default/Settings.h @@ -46,15 +46,6 @@ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif -/** Defines the maximal size in Bytes of blocks fitting in CPU cache. - * The current value is set to generate blocks of 256x256 for float - * - * Typically for a single-threaded application you would set that to 25% of the size of your CPU caches in bytes - */ -#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE -#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE (sizeof(float)*512*512) -#endif - /** Defines the maximal width of the blocks used in the triangular product and solver * for vectors (level 2 blas xTRMV and xTRSV). The default is 8. */ diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/NEON/Complex.h b/gtsam/3rdparty/Eigen/src/Core/arch/NEON/Complex.h index 9678040e7..a88354bd3 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/NEON/Complex.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/NEON/Complex.h @@ -22,11 +22,13 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . -#ifndef EIGEN_COMPLEX_ALTIVEC_H -#define EIGEN_COMPLEX_ALTIVEC_H +#ifndef EIGEN_COMPLEX_NEON_H +#define EIGEN_COMPLEX_NEON_H -static uint32x4_t ei_p4ui_CONJ_XOR = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; -static uint32x2_t ei_p2ui_CONJ_XOR = { 0x00000000, 0x80000000 }; +namespace internal { + +static uint32x4_t p4ui_CONJ_XOR = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; +static uint32x2_t p2ui_CONJ_XOR = { 0x00000000, 0x80000000 }; //---------- float ---------- struct Packet2cf @@ -36,7 +38,7 @@ struct Packet2cf Packet4f v; }; -template<> struct ei_packet_traits > : ei_default_packet_traits +template<> struct packet_traits > : default_packet_traits { typedef Packet2cf type; enum { @@ -56,9 +58,9 @@ template<> struct ei_packet_traits > : ei_default_packet_tr }; }; -template<> struct ei_unpacket_traits { typedef std::complex type; enum {size=2}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; }; -template<> EIGEN_STRONG_INLINE Packet2cf ei_pset1(const std::complex& from) +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) { float32x2_t r64; r64 = vld1_f32((float *)&from); @@ -66,15 +68,16 @@ template<> EIGEN_STRONG_INLINE Packet2cf ei_pset1(const std::complex< return Packet2cf(vcombine_f32(r64, r64)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(ei_padd(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(ei_psub(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pnegate(const Packet2cf& a) { return Packet2cf(ei_pnegate(a.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pconj(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { - return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v), ei_p4ui_CONJ_XOR))); + Packet4ui b = vreinterpretq_u32_f32(a.v); + return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pmul(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) { Packet4f v1, v2; float32x2_t a_lo, a_hi; @@ -88,7 +91,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ei_pmul(const Packet2cf& a, // Multiply the imag a with b v2 = vmulq_f32(v2, b.v); // Conjugate v2 - v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), ei_p4ui_CONJ_XOR)); + v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR)); // Swap real/imag elements in v2. a_lo = vrev64_f32(vget_low_f32(v2)); a_hi = vrev64_f32(vget_high_f32(v2)); @@ -97,39 +100,41 @@ template<> EIGEN_STRONG_INLINE Packet2cf ei_pmul(const Packet2cf& a, return Packet2cf(vaddq_f32(v1, v2)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pand (const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_por (const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pxor (const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pandnot(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v)))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pload >(const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(ei_pload((const float*)from)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_ploadu >(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ei_ploadu((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf pload(const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu((const float*)from)); } -template<> EIGEN_STRONG_INLINE void ei_pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE ei_pstore((float*)to, from.v); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE ei_pstoreu((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } -template<> EIGEN_STRONG_INLINE void ei_prefetch >(const std::complex * addr) { __pld((float *)addr); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } -template<> EIGEN_STRONG_INLINE std::complex ei_pfirst(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { __pld((float *)addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) { std::complex EIGEN_ALIGN16 x[2]; vst1q_f32((float *)x, a.v); return x[0]; } -template<> EIGEN_STRONG_INLINE Packet2cf ei_preverse(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { float32x2_t a_lo, a_hi; Packet4f a_r128; @@ -141,12 +146,12 @@ template<> EIGEN_STRONG_INLINE Packet2cf ei_preverse(const Packet2cf& a) return Packet2cf(a_r128); } -EIGEN_STRONG_INLINE Packet2cf ei_pcplxflip/**/(const Packet2cf& x) +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& a) { return Packet2cf(vrev64q_f32(a.v)); } -template<> EIGEN_STRONG_INLINE std::complex ei_predux(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) { float32x2_t a1, a2; std::complex s; @@ -159,7 +164,7 @@ template<> EIGEN_STRONG_INLINE std::complex ei_predux(const Pa return s; } -template<> EIGEN_STRONG_INLINE Packet2cf ei_preduxp(const Packet2cf* vecs) +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) { Packet4f sum1, sum2, sum; @@ -171,7 +176,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ei_preduxp(const Packet2cf* return Packet2cf(sum); } -template<> EIGEN_STRONG_INLINE std::complex ei_predux_mul(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) { float32x2_t a1, a2, v1, v2, prod; std::complex s; @@ -187,7 +192,7 @@ template<> EIGEN_STRONG_INLINE std::complex ei_predux_mul(cons // Multiply the imag a with b v2 = vmul_f32(v2, a2); // Conjugate v2 - v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), ei_p2ui_CONJ_XOR)); + v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR)); // Swap real/imag elements in v2. v2 = vrev64_f32(v2); // Add v1, v2 @@ -199,7 +204,7 @@ template<> EIGEN_STRONG_INLINE std::complex ei_predux_mul(cons } template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) { @@ -210,43 +215,43 @@ struct ei_palign_impl } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { - return ei_pmul(a, ei_pconj(b)); + return internal::pmul(a, pconj(b)); } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { - return ei_pmul(ei_pconj(a), b); + return internal::pmul(pconj(a), b); } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { - return ei_pconj(ei_pmul(a, b)); + return pconj(internal::pmul(a, b)); } }; -template<> EIGEN_STRONG_INLINE Packet2cf ei_pdiv(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) { // TODO optimize it for AltiVec - Packet2cf res = ei_conj_helper().pmul(a,b); + Packet2cf res = conj_helper().pmul(a,b); Packet4f s, rev_s; float32x2_t a_lo, a_hi; @@ -256,7 +261,9 @@ template<> EIGEN_STRONG_INLINE Packet2cf ei_pdiv(const Packet2cf& a, a_hi = vrev64_f32(vget_high_f32(s)); rev_s = vcombine_f32(a_lo, a_hi); - return Packet2cf(ei_pdiv(res.v, vaddq_f32(s,rev_s))); + return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s))); } -#endif // EIGEN_COMPLEX_ALTIVEC_H +} // end namespace internal + +#endif // EIGEN_COMPLEX_NEON_H diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/NEON/PacketMath.h b/gtsam/3rdparty/Eigen/src/Core/arch/NEON/PacketMath.h index 8220ed07c..4f66b0f43 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/NEON/PacketMath.h @@ -27,14 +27,12 @@ #ifndef EIGEN_PACKET_MATH_NEON_H #define EIGEN_PACKET_MATH_NEON_H +namespace internal { + #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif -#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE -#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE 4*192*192 -#endif - // FIXME NEON has 16 quad registers, but since the current register allocator // is so bad, it is much better to reduce it to 8 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS @@ -43,21 +41,22 @@ typedef float32x4_t Packet4f; typedef int32x4_t Packet4i; +typedef uint32x4_t Packet4ui; #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ - const Packet4f ei_p4f_##NAME = ei_pset1(X) + const Packet4f p4f_##NAME = pset1(X) #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ - const Packet4f ei_p4f_##NAME = vreinterpretq_f32_u32(ei_pset1(X)) + const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1(X)) #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ - const Packet4i ei_p4i_##NAME = ei_pset1(X) + const Packet4i p4i_##NAME = pset1(X) #ifndef __pld #define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" ); #endif -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet4f type; enum { @@ -74,7 +73,7 @@ template<> struct ei_packet_traits : ei_default_packet_traits HasSqrt = 0 }; }; -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet4i type; enum { @@ -85,36 +84,44 @@ template<> struct ei_packet_traits : ei_default_packet_traits }; }; -template<> struct ei_unpacket_traits { typedef float type; enum {size=4}; }; -template<> struct ei_unpacket_traits { typedef int type; enum {size=4}; }; +#if EIGEN_GNUC_AT_MOST(4,4) +// workaround gcc 4.2, 4.3 and 4.4 compilatin issue +EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } +EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } +EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } +EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } +#endif -template<> EIGEN_STRONG_INLINE Packet4f ei_pset1(const float& from) { return vdupq_n_f32(from); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pset1(const int& from) { return vdupq_n_s32(from); } +template<> struct unpacket_traits { typedef float type; enum {size=4}; }; +template<> struct unpacket_traits { typedef int type; enum {size=4}; }; -template<> EIGEN_STRONG_INLINE Packet4f ei_plset(const float& a) +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return vdupq_n_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return vdupq_n_s32(from); } + +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { Packet4f countdown = { 3, 2, 1, 0 }; - return vaddq_f32(ei_pset1(a), countdown); + return vaddq_f32(pset1(a), countdown); } -template<> EIGEN_STRONG_INLINE Packet4i ei_plset(const int& a) +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { Packet4i countdown = { 3, 2, 1, 0 }; - return vaddq_s32(ei_pset1(a), countdown); + return vaddq_s32(pset1(a), countdown); } -template<> EIGEN_STRONG_INLINE Packet4f ei_padd(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_padd(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_psub(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_psub(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pnegate(const Packet4f& a) { return vnegq_f32(a); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pnegate(const Packet4i& a) { return vnegq_s32(a); } +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmul(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmul(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pdiv(const Packet4f& a, const Packet4f& b) +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { Packet4f inv, restep, div; @@ -135,80 +142,80 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_pdiv(const Packet4f& a, con return div; } -template<> EIGEN_STRONG_INLINE Packet4i ei_pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) -{ ei_assert(false && "packet integer division are not supported by NEON"); - return ei_pset1(0); +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); } // for some weird raisons, it has to be overloaded for packet of integers -template<> EIGEN_STRONG_INLINE Packet4i ei_pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return ei_padd(ei_pmul(a,b), c); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmin(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmin(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmax(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmax(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics -template<> EIGEN_STRONG_INLINE Packet4f ei_pand(const Packet4f& a, const Packet4f& b) +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pand(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_por(const Packet4f& a, const Packet4f& b) +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } -template<> EIGEN_STRONG_INLINE Packet4i ei_por(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pxor(const Packet4f& a, const Packet4f& b) +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pxor(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pandnot(const Packet4f& a, const Packet4f& b) +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pandnot(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } -template<> EIGEN_STRONG_INLINE Packet4f ei_ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } -template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } -template<> EIGEN_STRONG_INLINE Packet4f ei_ploaddup(const float* from) +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { - float32x2_t lo, ho; + float32x2_t lo, hi; lo = vdup_n_f32(*from); hi = vdup_n_f32(*from); return vcombine_f32(lo, hi); } -template<> EIGEN_STRONG_INLINE Packet4i ei_ploaddup(const float* from) +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) { - int32x2_t lo, ho; + int32x2_t lo, hi; lo = vdup_n_s32(*from); hi = vdup_n_s32(*from); return vcombine_s32(lo, hi); } -template<> EIGEN_STRONG_INLINE void ei_pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } -template<> EIGEN_STRONG_INLINE void ei_pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const float* addr) { __pld(addr); } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const int* addr) { __pld(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { __pld(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { __pld(addr); } // FIXME only store the 2 first elements ? -template<> EIGEN_STRONG_INLINE float ei_pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } -template<> EIGEN_STRONG_INLINE int ei_pfirst(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } -template<> EIGEN_STRONG_INLINE Packet4f ei_preverse(const Packet4f& a) { +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { float32x2_t a_lo, a_hi; Packet4f a_r64; @@ -217,7 +224,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_preverse(const Packet4f& a) { a_hi = vget_high_f32(a_r64); return vcombine_f32(a_hi, a_lo); } -template<> EIGEN_STRONG_INLINE Packet4i ei_preverse(const Packet4i& a) { +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { int32x2_t a_lo, a_hi; Packet4i a_r64; @@ -226,10 +233,10 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_preverse(const Packet4i& a) { a_hi = vget_high_s32(a_r64); return vcombine_s32(a_hi, a_lo); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pabs(const Packet4f& a) { return vabsq_f32(a); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pabs(const Packet4i& a) { return vabsq_s32(a); } +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } -template<> EIGEN_STRONG_INLINE float ei_predux(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { float32x2_t a_lo, a_hi, sum; float s[2]; @@ -243,7 +250,7 @@ template<> EIGEN_STRONG_INLINE float ei_predux(const Packet4f& a) return s[0]; } -template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vecs) +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) { float32x4x2_t vtrn1, vtrn2, res1, res2; Packet4f sum1, sum2, sum; @@ -263,7 +270,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vec return sum; } -template<> EIGEN_STRONG_INLINE int ei_predux(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) { int32x2_t a_lo, a_hi, sum; int32_t s[2]; @@ -277,7 +284,7 @@ template<> EIGEN_STRONG_INLINE int ei_predux(const Packet4i& a) return s[0]; } -template<> EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vecs) +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) { int32x4x2_t vtrn1, vtrn2, res1, res2; Packet4i sum1, sum2, sum; @@ -299,7 +306,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vec // Other reduction functions: // mul -template<> EIGEN_STRONG_INLINE float ei_predux_mul(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) { float32x2_t a_lo, a_hi, prod; float s[2]; @@ -315,7 +322,7 @@ template<> EIGEN_STRONG_INLINE float ei_predux_mul(const Packet4f& a) return s[0]; } -template<> EIGEN_STRONG_INLINE int ei_predux_mul(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) { int32x2_t a_lo, a_hi, prod; int32_t s[2]; @@ -333,7 +340,7 @@ template<> EIGEN_STRONG_INLINE int ei_predux_mul(const Packet4i& a) } // min -template<> EIGEN_STRONG_INLINE float ei_predux_min(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) { float32x2_t a_lo, a_hi, min; float s[2]; @@ -346,7 +353,7 @@ template<> EIGEN_STRONG_INLINE float ei_predux_min(const Packet4f& a) return s[0]; } -template<> EIGEN_STRONG_INLINE int ei_predux_min(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) { int32x2_t a_lo, a_hi, min; int32_t s[2]; @@ -361,7 +368,7 @@ template<> EIGEN_STRONG_INLINE int ei_predux_min(const Packet4i& a) } // max -template<> EIGEN_STRONG_INLINE float ei_predux_max(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) { float32x2_t a_lo, a_hi, max; float s[2]; @@ -374,7 +381,7 @@ template<> EIGEN_STRONG_INLINE float ei_predux_max(const Packet4f& a) return s[0]; } -template<> EIGEN_STRONG_INLINE int ei_predux_max(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) { int32x2_t a_lo, a_hi, max; int32_t s[2]; @@ -389,7 +396,7 @@ template<> EIGEN_STRONG_INLINE int ei_predux_max(const Packet4i& a) } template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) { @@ -399,7 +406,7 @@ struct ei_palign_impl }; template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) { @@ -407,4 +414,7 @@ struct ei_palign_impl first = vextq_s32(first, second, Offset); } }; + +} // end namespace internal + #endif // EIGEN_PACKET_MATH_NEON_H diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/SSE/Complex.h b/gtsam/3rdparty/Eigen/src/Core/arch/SSE/Complex.h index 819d59364..c352bb3e6 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/SSE/Complex.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/SSE/Complex.h @@ -25,6 +25,8 @@ #ifndef EIGEN_COMPLEX_SSE_H #define EIGEN_COMPLEX_SSE_H +namespace internal { + //---------- float ---------- struct Packet2cf { @@ -33,7 +35,7 @@ struct Packet2cf __m128 v; }; -template<> struct ei_packet_traits > : ei_default_packet_traits +template<> struct packet_traits > : default_packet_traits { typedef Packet2cf type; enum { @@ -54,85 +56,100 @@ template<> struct ei_packet_traits > : ei_default_packet_tr }; }; -template<> struct ei_unpacket_traits { typedef std::complex type; enum {size=2}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; }; -template<> EIGEN_STRONG_INLINE Packet2cf ei_padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pnegate(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); return Packet2cf(_mm_xor_ps(a.v,mask)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pconj(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); return Packet2cf(_mm_xor_ps(a.v,mask)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pmul(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) { // TODO optimize it for SSE3 and 4 #ifdef EIGEN_VECTORIZE_SSE3 return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v), _mm_mul_ps(_mm_movehdup_ps(a.v), - ei_vec4f_swizzle1(b.v, 1, 0, 3, 2)))); -// return Packet2cf(_mm_addsub_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), -// _mm_mul_ps(ei_vec4f_swizzle1(a.v, 1, 1, 3, 3), -// ei_vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); +// return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), +// _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), +// vec4f_swizzle1(b.v, 1, 0, 3, 2)))); #else const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000)); - return Packet2cf(_mm_add_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), - _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 1, 1, 3, 3), - ei_vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); + return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), + _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); #endif } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(ei_pload(&ei_real_ref(*from))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ei_ploadu(&ei_real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet2cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload(&real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu(&real_ref(*from))); } -template<> EIGEN_STRONG_INLINE void ei_pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE ei_pstore(&ei_real_ref(*to), from.v); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE ei_pstoreu(&ei_real_ref(*to), from.v); } - -template<> EIGEN_STRONG_INLINE void ei_prefetch >(const std::complex * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } - -template<> EIGEN_STRONG_INLINE Packet2cf ei_pset1(const std::complex& from) +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) { Packet2cf res; + #if EIGEN_GNUC_AT_MOST(4,2) + // workaround annoying "may be used uninitialized in this function" warning with gcc 4.2 + res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)&from); + #else res.v = _mm_loadl_pi(res.v, (const __m64*)&from); + #endif return Packet2cf(_mm_movelh_ps(res.v,res.v)); } -template<> EIGEN_STRONG_INLINE std::complex ei_pfirst(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&real_ref(*to), from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&real_ref(*to), from.v); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) { + #if EIGEN_GNUC_AT_MOST(4,3) + // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares... + // This workaround also fix invalid code generation with gcc 4.3 + EIGEN_ALIGN16 std::complex res[2]; + _mm_store_ps((float*)res, a.v); + return res[0]; + #else std::complex res; _mm_storel_pi((__m64*)&res, a.v); return res; + #endif } -template<> EIGEN_STRONG_INLINE Packet2cf ei_preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(ei_preverse(_mm_castps_pd(a.v)))); } +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(_mm_castps_pd(a.v)))); } -template<> EIGEN_STRONG_INLINE std::complex ei_predux(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) { - return ei_pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v)))); + return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v)))); } -template<> EIGEN_STRONG_INLINE Packet2cf ei_preduxp(const Packet2cf* vecs) +template<> EIGEN_STRONG_INLINE Packet2cf preduxp(const Packet2cf* vecs) { return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v))); } -template<> EIGEN_STRONG_INLINE std::complex ei_predux_mul(const Packet2cf& a) +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) { - return ei_pfirst(ei_pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v)))); + return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v)))); } template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second) { @@ -144,89 +161,89 @@ struct ei_palign_impl } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { #ifdef EIGEN_VECTORIZE_SSE3 - return ei_pmul(a, ei_pconj(b)); + return internal::pmul(a, pconj(b)); #else const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); - return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask), - _mm_mul_ps(ei_vec4f_swizzle1(a.v, 1, 1, 3, 3), - ei_vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask), + _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); #endif } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { #ifdef EIGEN_VECTORIZE_SSE3 - return ei_pmul(ei_pconj(a), b); + return internal::pmul(pconj(a), b); #else const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); - return Packet2cf(_mm_add_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), - _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 1, 1, 3, 3), - ei_vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); + return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), + _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); #endif } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const { #ifdef EIGEN_VECTORIZE_SSE3 - return ei_pconj(ei_pmul(a, b)); + return pconj(internal::pmul(a, b)); #else const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); - return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask), - _mm_mul_ps(ei_vec4f_swizzle1(a.v, 1, 1, 3, 3), - ei_vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask), + _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); #endif } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const - { return ei_padd(c, pmul(x,y)); } + { return padd(c, pmul(x,y)); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const - { return Packet2cf(ei_pmul(x, y.v)); } + { return Packet2cf(Eigen::internal::pmul(x, y.v)); } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const - { return ei_padd(c, pmul(x,y)); } + { return padd(c, pmul(x,y)); } EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const - { return Packet2cf(ei_pmul(x.v, y)); } + { return Packet2cf(Eigen::internal::pmul(x.v, y)); } }; -template<> EIGEN_STRONG_INLINE Packet2cf ei_pdiv(const Packet2cf& a, const Packet2cf& b) +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) { // TODO optimize it for SSE3 and 4 - Packet2cf res = ei_conj_helper().pmul(a,b); + Packet2cf res = conj_helper().pmul(a,b); __m128 s = _mm_mul_ps(b.v,b.v); return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1))))); } -EIGEN_STRONG_INLINE Packet2cf ei_pcplxflip/**/(const Packet2cf& x) +EIGEN_STRONG_INLINE Packet2cf pcplxflip/**/(const Packet2cf& x) { - return Packet2cf(ei_vec4f_swizzle1(x.v, 1, 0, 3, 2)); + return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2)); } @@ -238,7 +255,7 @@ struct Packet1cd __m128d v; }; -template<> struct ei_packet_traits > : ei_default_packet_traits +template<> struct packet_traits > : default_packet_traits { typedef Packet1cd type; enum { @@ -259,77 +276,79 @@ template<> struct ei_packet_traits > : ei_default_packet_t }; }; -template<> struct ei_unpacket_traits { typedef std::complex type; enum {size=1}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=1}; }; -template<> EIGEN_STRONG_INLINE Packet1cd ei_padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pnegate(const Packet1cd& a) { return Packet1cd(ei_pnegate(a.v)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pconj(const Packet1cd& a) +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); return Packet1cd(_mm_xor_pd(a.v,mask)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pmul(const Packet1cd& a, const Packet1cd& b) +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) { // TODO optimize it for SSE3 and 4 #ifdef EIGEN_VECTORIZE_SSE3 - return Packet1cd(_mm_addsub_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 0, 0), b.v), - _mm_mul_pd(ei_vec2d_swizzle1(a.v, 1, 1), - ei_vec2d_swizzle1(b.v, 1, 0)))); + return Packet1cd(_mm_addsub_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); #else const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); - return Packet1cd(_mm_add_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 0, 0), b.v), - _mm_xor_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 1, 1), - ei_vec2d_swizzle1(b.v, 1, 0)), mask))); + return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), + _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)), mask))); #endif } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); } // FIXME force unaligned load, this is a temporary fix -template<> EIGEN_STRONG_INLINE Packet1cd ei_pload (const std::complex* from) -{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(ei_pload((const double*)from)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_ploadu(const std::complex* from) -{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ei_ploadu((const double*)from)); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_pset1(const std::complex& from) -{ /* here we really have to use unaligned loads :( */ return ei_ploadu(&from); } +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } // FIXME force unaligned store, this is a temporary fix -template<> EIGEN_STRONG_INLINE void ei_pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE ei_pstore((double*)to, from.v); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE ei_pstoreu((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } -template<> EIGEN_STRONG_INLINE void ei_prefetch >(const std::complex * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } -template<> EIGEN_STRONG_INLINE std::complex ei_pfirst(const Packet1cd& a) +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) { EIGEN_ALIGN16 double res[2]; _mm_store_pd(res, a.v); return std::complex(res[0],res[1]); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_preverse(const Packet1cd& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } -template<> EIGEN_STRONG_INLINE std::complex ei_predux(const Packet1cd& a) +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) { - return ei_pfirst(a); + return pfirst(a); } -template<> EIGEN_STRONG_INLINE Packet1cd ei_preduxp(const Packet1cd* vecs) +template<> EIGEN_STRONG_INLINE Packet1cd preduxp(const Packet1cd* vecs) { return vecs[0]; } -template<> EIGEN_STRONG_INLINE std::complex ei_predux_mul(const Packet1cd& a) +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) { - return ei_pfirst(a); + return pfirst(a); } template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet1cd& /*first*/, const Packet1cd& /*second*/) { @@ -338,89 +357,91 @@ struct ei_palign_impl } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const { #ifdef EIGEN_VECTORIZE_SSE3 - return ei_pmul(a, ei_pconj(b)); + return internal::pmul(a, pconj(b)); #else const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); - return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 0, 0), b.v), mask), - _mm_mul_pd(ei_vec2d_swizzle1(a.v, 1, 1), - ei_vec2d_swizzle1(b.v, 1, 0)))); + return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); #endif } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const { #ifdef EIGEN_VECTORIZE_SSE3 - return ei_pmul(ei_pconj(a), b); + return internal::pmul(pconj(a), b); #else const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); - return Packet1cd(_mm_add_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 0, 0), b.v), - _mm_xor_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 1, 1), - ei_vec2d_swizzle1(b.v, 1, 0)), mask))); + return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), + _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)), mask))); #endif } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const - { return ei_padd(pmul(x,y),c); } + { return padd(pmul(x,y),c); } EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const { #ifdef EIGEN_VECTORIZE_SSE3 - return ei_pconj(ei_pmul(a, b)); + return pconj(internal::pmul(a, b)); #else const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); - return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(ei_vec2d_swizzle1(a.v, 0, 0), b.v), mask), - _mm_mul_pd(ei_vec2d_swizzle1(a.v, 1, 1), - ei_vec2d_swizzle1(b.v, 1, 0)))); + return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); #endif } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const - { return ei_padd(c, pmul(x,y)); } + { return padd(c, pmul(x,y)); } EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const - { return Packet1cd(ei_pmul(x, y.v)); } + { return Packet1cd(Eigen::internal::pmul(x, y.v)); } }; -template<> struct ei_conj_helper +template<> struct conj_helper { EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const - { return ei_padd(c, pmul(x,y)); } + { return padd(c, pmul(x,y)); } EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const - { return Packet1cd(ei_pmul(x.v, y)); } + { return Packet1cd(Eigen::internal::pmul(x.v, y)); } }; -template<> EIGEN_STRONG_INLINE Packet1cd ei_pdiv(const Packet1cd& a, const Packet1cd& b) +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) { // TODO optimize it for SSE3 and 4 - Packet1cd res = ei_conj_helper().pmul(a,b); + Packet1cd res = conj_helper().pmul(a,b); __m128d s = _mm_mul_pd(b.v,b.v); return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1)))); } -EIGEN_STRONG_INLINE Packet1cd ei_pcplxflip/**/(const Packet1cd& x) +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) { - return Packet1cd(ei_preverse(x.v)); + return Packet1cd(preverse(x.v)); } +} // end namespace internal + #endif // EIGEN_COMPLEX_SSE_H diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/SSE/MathFunctions.h b/gtsam/3rdparty/Eigen/src/Core/arch/SSE/MathFunctions.h index cb73fd205..9d56d8218 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/SSE/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -30,8 +30,10 @@ #ifndef EIGEN_MATH_FUNCTIONS_SSE_H #define EIGEN_MATH_FUNCTIONS_SSE_H +namespace internal { + template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED -Packet4f ei_plog(const Packet4f& _x) +Packet4f plog(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); @@ -64,15 +66,15 @@ Packet4f ei_plog(const Packet4f& _x) Packet4f invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps()); - x = ei_pmax(x, ei_p4f_min_norm_pos); /* cut off denormalized stuff */ + x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23); /* keep only the fractional part */ - x = _mm_and_ps(x, ei_p4f_inv_mant_mask); - x = _mm_or_ps(x, ei_p4f_half); + x = _mm_and_ps(x, p4f_inv_mant_mask); + x = _mm_or_ps(x, p4f_half); - emm0 = _mm_sub_epi32(emm0, ei_p4i_0x7f); - Packet4f e = ei_padd(_mm_cvtepi32_ps(emm0), ei_p4f_1); + emm0 = _mm_sub_epi32(emm0, p4i_0x7f); + Packet4f e = padd(_mm_cvtepi32_ps(emm0), p4f_1); /* part2: if( x < SQRTHF ) { @@ -80,38 +82,38 @@ Packet4f ei_plog(const Packet4f& _x) x = x + x - 1.0; } else { x = x - 1.0; } */ - Packet4f mask = _mm_cmplt_ps(x, ei_p4f_cephes_SQRTHF); + Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF); Packet4f tmp = _mm_and_ps(x, mask); - x = ei_psub(x, ei_p4f_1); - e = ei_psub(e, _mm_and_ps(ei_p4f_1, mask)); - x = ei_padd(x, tmp); + x = psub(x, p4f_1); + e = psub(e, _mm_and_ps(p4f_1, mask)); + x = padd(x, tmp); - Packet4f x2 = ei_pmul(x,x); - Packet4f x3 = ei_pmul(x2,x); + Packet4f x2 = pmul(x,x); + Packet4f x3 = pmul(x2,x); Packet4f y, y1, y2; - y = ei_pmadd(ei_p4f_cephes_log_p0, x, ei_p4f_cephes_log_p1); - y1 = ei_pmadd(ei_p4f_cephes_log_p3, x, ei_p4f_cephes_log_p4); - y2 = ei_pmadd(ei_p4f_cephes_log_p6, x, ei_p4f_cephes_log_p7); - y = ei_pmadd(y , x, ei_p4f_cephes_log_p2); - y1 = ei_pmadd(y1, x, ei_p4f_cephes_log_p5); - y2 = ei_pmadd(y2, x, ei_p4f_cephes_log_p8); - y = ei_pmadd(y, x3, y1); - y = ei_pmadd(y, x3, y2); - y = ei_pmul(y, x3); + y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1); + y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4); + y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7); + y = pmadd(y , x, p4f_cephes_log_p2); + y1 = pmadd(y1, x, p4f_cephes_log_p5); + y2 = pmadd(y2, x, p4f_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); - y1 = ei_pmul(e, ei_p4f_cephes_log_q1); - tmp = ei_pmul(x2, ei_p4f_half); - y = ei_padd(y, y1); - x = ei_psub(x, tmp); - y2 = ei_pmul(e, ei_p4f_cephes_log_q2); - x = ei_padd(x, y); - x = ei_padd(x, y2); + y1 = pmul(e, p4f_cephes_log_q1); + tmp = pmul(x2, p4f_half); + y = padd(y, y1); + x = psub(x, tmp); + y2 = pmul(e, p4f_cephes_log_q2); + x = padd(x, y); + x = padd(x, y2); return _mm_or_ps(x, invalid_mask); // negative arg will be NAN } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED -Packet4f ei_pexp(const Packet4f& _x) +Packet4f pexp(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); @@ -137,40 +139,40 @@ Packet4f ei_pexp(const Packet4f& _x) Packet4i emm0; // clamp x - x = ei_pmax(ei_pmin(x, ei_p4f_exp_hi), ei_p4f_exp_lo); + x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ - fx = ei_pmadd(x, ei_p4f_cephes_LOG2EF, ei_p4f_half); + fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half); /* how to perform a floorf with SSE: just below */ emm0 = _mm_cvttps_epi32(fx); tmp = _mm_cvtepi32_ps(emm0); /* if greater, substract 1 */ Packet4f mask = _mm_cmpgt_ps(tmp, fx); - mask = _mm_and_ps(mask, ei_p4f_1); - fx = ei_psub(tmp, mask); + mask = _mm_and_ps(mask, p4f_1); + fx = psub(tmp, mask); - tmp = ei_pmul(fx, ei_p4f_cephes_exp_C1); - Packet4f z = ei_pmul(fx, ei_p4f_cephes_exp_C2); - x = ei_psub(x, tmp); - x = ei_psub(x, z); + tmp = pmul(fx, p4f_cephes_exp_C1); + Packet4f z = pmul(fx, p4f_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); - z = ei_pmul(x,x); + z = pmul(x,x); - Packet4f y = ei_p4f_cephes_exp_p0; - y = ei_pmadd(y, x, ei_p4f_cephes_exp_p1); - y = ei_pmadd(y, x, ei_p4f_cephes_exp_p2); - y = ei_pmadd(y, x, ei_p4f_cephes_exp_p3); - y = ei_pmadd(y, x, ei_p4f_cephes_exp_p4); - y = ei_pmadd(y, x, ei_p4f_cephes_exp_p5); - y = ei_pmadd(y, z, x); - y = ei_padd(y, ei_p4f_1); + Packet4f y = p4f_cephes_exp_p0; + y = pmadd(y, x, p4f_cephes_exp_p1); + y = pmadd(y, x, p4f_cephes_exp_p2); + y = pmadd(y, x, p4f_cephes_exp_p3); + y = pmadd(y, x, p4f_cephes_exp_p4); + y = pmadd(y, x, p4f_cephes_exp_p5); + y = pmadd(y, z, x); + y = padd(y, p4f_1); /* build 2^n */ emm0 = _mm_cvttps_epi32(fx); - emm0 = _mm_add_epi32(emm0, ei_p4i_0x7f); + emm0 = _mm_add_epi32(emm0, p4i_0x7f); emm0 = _mm_slli_epi32(emm0, 23); - return ei_pmul(y, _mm_castsi128_ps(emm0)); + return pmul(y, _mm_castsi128_ps(emm0)); } /* evaluation of 4 sines at onces, using SSE2 intrinsics. @@ -186,7 +188,7 @@ Packet4f ei_pexp(const Packet4f& _x) */ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED -Packet4f ei_psin(const Packet4f& _x) +Packet4f psin(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); @@ -215,24 +217,24 @@ Packet4f ei_psin(const Packet4f& _x) Packet4i emm0, emm2; sign_bit = x; /* take the absolute value */ - x = ei_pabs(x); + x = pabs(x); /* take the modulo */ /* extract the sign bit (upper one) */ - sign_bit = _mm_and_ps(sign_bit, ei_p4f_sign_mask); + sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask); /* scale by 4/Pi */ - y = ei_pmul(x, ei_p4f_cephes_FOPI); + y = pmul(x, p4f_cephes_FOPI); /* store the integer part of y in mm0 */ emm2 = _mm_cvttps_epi32(y); /* j=(j+1) & (~1) (see the cephes sources) */ - emm2 = _mm_add_epi32(emm2, ei_p4i_1); - emm2 = _mm_and_si128(emm2, ei_p4i_not1); + emm2 = _mm_add_epi32(emm2, p4i_1); + emm2 = _mm_and_si128(emm2, p4i_not1); y = _mm_cvtepi32_ps(emm2); /* get the swap sign flag */ - emm0 = _mm_and_si128(emm2, ei_p4i_4); + emm0 = _mm_and_si128(emm2, p4i_4); emm0 = _mm_slli_epi32(emm0, 29); /* get the polynom selection mask there is one polynom for 0 <= x <= Pi/4 @@ -240,7 +242,7 @@ Packet4f ei_psin(const Packet4f& _x) Both branches will be computed. */ - emm2 = _mm_and_si128(emm2, ei_p4i_2); + emm2 = _mm_and_si128(emm2, p4i_2); emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128()); Packet4f swap_sign_bit = _mm_castsi128_ps(emm0); @@ -249,33 +251,33 @@ Packet4f ei_psin(const Packet4f& _x) /* The magic pass: "Extended precision modular arithmetic" x = ((x - y * DP1) - y * DP2) - y * DP3; */ - xmm1 = ei_pmul(y, ei_p4f_minus_cephes_DP1); - xmm2 = ei_pmul(y, ei_p4f_minus_cephes_DP2); - xmm3 = ei_pmul(y, ei_p4f_minus_cephes_DP3); - x = ei_padd(x, xmm1); - x = ei_padd(x, xmm2); - x = ei_padd(x, xmm3); + xmm1 = pmul(y, p4f_minus_cephes_DP1); + xmm2 = pmul(y, p4f_minus_cephes_DP2); + xmm3 = pmul(y, p4f_minus_cephes_DP3); + x = padd(x, xmm1); + x = padd(x, xmm2); + x = padd(x, xmm3); /* Evaluate the first polynom (0 <= x <= Pi/4) */ - y = ei_p4f_coscof_p0; + y = p4f_coscof_p0; Packet4f z = _mm_mul_ps(x,x); - y = ei_pmadd(y, z, ei_p4f_coscof_p1); - y = ei_pmadd(y, z, ei_p4f_coscof_p2); - y = ei_pmul(y, z); - y = ei_pmul(y, z); - Packet4f tmp = ei_pmul(z, ei_p4f_half); - y = ei_psub(y, tmp); - y = ei_padd(y, ei_p4f_1); + y = pmadd(y, z, p4f_coscof_p1); + y = pmadd(y, z, p4f_coscof_p2); + y = pmul(y, z); + y = pmul(y, z); + Packet4f tmp = pmul(z, p4f_half); + y = psub(y, tmp); + y = padd(y, p4f_1); /* Evaluate the second polynom (Pi/4 <= x <= 0) */ - Packet4f y2 = ei_p4f_sincof_p0; - y2 = ei_pmadd(y2, z, ei_p4f_sincof_p1); - y2 = ei_pmadd(y2, z, ei_p4f_sincof_p2); - y2 = ei_pmul(y2, z); - y2 = ei_pmul(y2, x); - y2 = ei_padd(y2, x); + Packet4f y2 = p4f_sincof_p0; + y2 = pmadd(y2, z, p4f_sincof_p1); + y2 = pmadd(y2, z, p4f_sincof_p2); + y2 = pmul(y2, z); + y2 = pmul(y2, x); + y2 = padd(y2, x); /* select the correct result from the two polynoms */ y2 = _mm_and_ps(poly_mask, y2); @@ -285,9 +287,9 @@ Packet4f ei_psin(const Packet4f& _x) return _mm_xor_ps(y, sign_bit); } -/* almost the same as ei_psin */ +/* almost the same as psin */ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED -Packet4f ei_pcos(const Packet4f& _x) +Packet4f pcos(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); @@ -312,25 +314,25 @@ Packet4f ei_pcos(const Packet4f& _x) Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y; Packet4i emm0, emm2; - x = ei_pabs(x); + x = pabs(x); /* scale by 4/Pi */ - y = ei_pmul(x, ei_p4f_cephes_FOPI); + y = pmul(x, p4f_cephes_FOPI); /* get the integer part of y */ emm2 = _mm_cvttps_epi32(y); /* j=(j+1) & (~1) (see the cephes sources) */ - emm2 = _mm_add_epi32(emm2, ei_p4i_1); - emm2 = _mm_and_si128(emm2, ei_p4i_not1); + emm2 = _mm_add_epi32(emm2, p4i_1); + emm2 = _mm_and_si128(emm2, p4i_not1); y = _mm_cvtepi32_ps(emm2); - emm2 = _mm_sub_epi32(emm2, ei_p4i_2); + emm2 = _mm_sub_epi32(emm2, p4i_2); /* get the swap sign flag */ - emm0 = _mm_andnot_si128(emm2, ei_p4i_4); + emm0 = _mm_andnot_si128(emm2, p4i_4); emm0 = _mm_slli_epi32(emm0, 29); /* get the polynom selection mask */ - emm2 = _mm_and_si128(emm2, ei_p4i_2); + emm2 = _mm_and_si128(emm2, p4i_2); emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128()); Packet4f sign_bit = _mm_castsi128_ps(emm0); @@ -338,31 +340,31 @@ Packet4f ei_pcos(const Packet4f& _x) /* The magic pass: "Extended precision modular arithmetic" x = ((x - y * DP1) - y * DP2) - y * DP3; */ - xmm1 = ei_pmul(y, ei_p4f_minus_cephes_DP1); - xmm2 = ei_pmul(y, ei_p4f_minus_cephes_DP2); - xmm3 = ei_pmul(y, ei_p4f_minus_cephes_DP3); - x = ei_padd(x, xmm1); - x = ei_padd(x, xmm2); - x = ei_padd(x, xmm3); + xmm1 = pmul(y, p4f_minus_cephes_DP1); + xmm2 = pmul(y, p4f_minus_cephes_DP2); + xmm3 = pmul(y, p4f_minus_cephes_DP3); + x = padd(x, xmm1); + x = padd(x, xmm2); + x = padd(x, xmm3); /* Evaluate the first polynom (0 <= x <= Pi/4) */ - y = ei_p4f_coscof_p0; - Packet4f z = ei_pmul(x,x); + y = p4f_coscof_p0; + Packet4f z = pmul(x,x); - y = ei_pmadd(y,z,ei_p4f_coscof_p1); - y = ei_pmadd(y,z,ei_p4f_coscof_p2); - y = ei_pmul(y, z); - y = ei_pmul(y, z); - Packet4f tmp = _mm_mul_ps(z, ei_p4f_half); - y = ei_psub(y, tmp); - y = ei_padd(y, ei_p4f_1); + y = pmadd(y,z,p4f_coscof_p1); + y = pmadd(y,z,p4f_coscof_p2); + y = pmul(y, z); + y = pmul(y, z); + Packet4f tmp = _mm_mul_ps(z, p4f_half); + y = psub(y, tmp); + y = padd(y, p4f_1); /* Evaluate the second polynom (Pi/4 <= x <= 0) */ - Packet4f y2 = ei_p4f_sincof_p0; - y2 = ei_pmadd(y2, z, ei_p4f_sincof_p1); - y2 = ei_pmadd(y2, z, ei_p4f_sincof_p2); - y2 = ei_pmul(y2, z); - y2 = ei_pmadd(y2, x, x); + Packet4f y2 = p4f_sincof_p0; + y2 = pmadd(y2, z, p4f_sincof_p1); + y2 = pmadd(y2, z, p4f_sincof_p2); + y2 = pmul(y2, z); + y2 = pmadd(y2, x, x); /* select the correct result from the two polynoms */ y2 = _mm_and_ps(poly_mask, y2); @@ -376,16 +378,18 @@ Packet4f ei_pcos(const Packet4f& _x) // This is based on Quake3's fast inverse square root. // For detail see here: http://www.beyond3d.com/content/articles/8/ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED -Packet4f ei_psqrt(const Packet4f& _x) +Packet4f psqrt(const Packet4f& _x) { - Packet4f half = ei_pmul(_x, ei_pset1(.5f)); + Packet4f half = pmul(_x, pset1(.5f)); /* select only the inverse sqrt of non-zero inputs */ - Packet4f non_zero_mask = _mm_cmpgt_ps(_x, ei_pset1(std::numeric_limits::epsilon())); + Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1(std::numeric_limits::epsilon())); Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x)); - x = ei_pmul(x, ei_psub(ei_pset1(1.5f), ei_pmul(half, ei_pmul(x,x)))); - return ei_pmul(_x,x); + x = pmul(x, psub(pset1(1.5f), pmul(half, pmul(x,x)))); + return pmul(_x,x); } +} // end namespace internal + #endif // EIGEN_MATH_FUNCTIONS_SSE_H diff --git a/gtsam/3rdparty/Eigen/src/Core/arch/SSE/PacketMath.h b/gtsam/3rdparty/Eigen/src/Core/arch/SSE/PacketMath.h index a7206e77d..908e27368 100644 --- a/gtsam/3rdparty/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/gtsam/3rdparty/Eigen/src/Core/arch/SSE/PacketMath.h @@ -25,6 +25,8 @@ #ifndef EIGEN_PACKET_MATH_SSE_H #define EIGEN_PACKET_MATH_SSE_H +namespace internal { + #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif @@ -37,36 +39,36 @@ typedef __m128 Packet4f; typedef __m128i Packet4i; typedef __m128d Packet2d; -template<> struct ei_is_arithmetic<__m128> { enum { ret = true }; }; -template<> struct ei_is_arithmetic<__m128i> { enum { ret = true }; }; -template<> struct ei_is_arithmetic<__m128d> { enum { ret = true }; }; +template<> struct is_arithmetic<__m128> { enum { value = true }; }; +template<> struct is_arithmetic<__m128i> { enum { value = true }; }; +template<> struct is_arithmetic<__m128d> { enum { value = true }; }; -#define ei_vec4f_swizzle1(v,p,q,r,s) \ +#define vec4f_swizzle1(v,p,q,r,s) \ (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p))))) -#define ei_vec4i_swizzle1(v,p,q,r,s) \ +#define vec4i_swizzle1(v,p,q,r,s) \ (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p)))) -#define ei_vec2d_swizzle1(v,p,q) \ +#define vec2d_swizzle1(v,p,q) \ (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2))))) -#define ei_vec4f_swizzle2(a,b,p,q,r,s) \ +#define vec4f_swizzle2(a,b,p,q,r,s) \ (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p)))) -#define ei_vec4i_swizzle2(a,b,p,q,r,s) \ +#define vec4i_swizzle2(a,b,p,q,r,s) \ (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p)))))) #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ - const Packet4f ei_p4f_##NAME = ei_pset1(X) + const Packet4f p4f_##NAME = pset1(X) #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ - const Packet4f ei_p4f_##NAME = _mm_castsi128_ps(ei_pset1(X)) + const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1(X)) #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ - const Packet4i ei_p4i_##NAME = ei_pset1(X) + const Packet4i p4i_##NAME = pset1(X) -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet4f type; enum { @@ -82,7 +84,7 @@ template<> struct ei_packet_traits : ei_default_packet_traits HasSqrt = 1 }; }; -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet2d type; enum { @@ -93,7 +95,7 @@ template<> struct ei_packet_traits : ei_default_packet_traits HasDiv = 1 }; }; -template<> struct ei_packet_traits : ei_default_packet_traits +template<> struct packet_traits : default_packet_traits { typedef Packet4i type; enum { @@ -104,125 +106,124 @@ template<> struct ei_packet_traits : ei_default_packet_traits }; }; -template<> struct ei_unpacket_traits { typedef float type; enum {size=4}; }; -template<> struct ei_unpacket_traits { typedef double type; enum {size=2}; }; -template<> struct ei_unpacket_traits { typedef int type; enum {size=4}; }; +template<> struct unpacket_traits { typedef float type; enum {size=4}; }; +template<> struct unpacket_traits { typedef double type; enum {size=2}; }; +template<> struct unpacket_traits { typedef int type; enum {size=4}; }; -#ifdef __GNUC__ -// Sometimes GCC implements _mm_set1_p* using multiple moves, -// that is inefficient :( (e.g., see ei_gemm_pack_rhs) -template<> EIGEN_STRONG_INLINE Packet4f ei_pset1(const float& from) { - Packet4f res = _mm_set_ss(from); - return ei_vec4f_swizzle1(res,0,0,0,0); -} -template<> EIGEN_STRONG_INLINE Packet2d ei_pset1(const double& from) { - // NOTE the SSE3 intrinsic _mm_loaddup_pd is never faster but sometimes much slower - Packet2d res = _mm_set_sd(from); - return ei_vec2d_swizzle1(res, 0, 0); -} -#else -template<> EIGEN_STRONG_INLINE Packet4f ei_pset1(const float& from) { return _mm_set1_ps(from); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pset1(const double& from) { return _mm_set1_pd(from); } -#endif -template<> EIGEN_STRONG_INLINE Packet4i ei_pset1(const int& from) { return _mm_set1_epi32(from); } +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set1_ps(from); } +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set1_pd(from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set1_epi32(from); } -template<> EIGEN_STRONG_INLINE Packet4f ei_plset(const float& a) { return _mm_add_ps(ei_pset1(a), _mm_set_ps(3,2,1,0)); } -template<> EIGEN_STRONG_INLINE Packet2d ei_plset(const double& a) { return _mm_add_pd(ei_pset1(a),_mm_set_pd(1,0)); } -template<> EIGEN_STRONG_INLINE Packet4i ei_plset(const int& a) { return _mm_add_epi32(ei_pset1(a),_mm_set_epi32(3,2,1,0)); } +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return _mm_add_ps(pset1(a), _mm_set_ps(3,2,1,0)); } +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return _mm_add_pd(pset1(a),_mm_set_pd(1,0)); } +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return _mm_add_epi32(pset1(a),_mm_set_epi32(3,2,1,0)); } -template<> EIGEN_STRONG_INLINE Packet4f ei_padd(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_padd(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_padd(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_psub(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_psub(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_psub(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pnegate(const Packet4f& a) +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); return _mm_xor_ps(a,mask); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pnegate(const Packet2d& a) +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000)); return _mm_xor_pd(a,mask); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pnegate(const Packet4i& a) +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { - return ei_psub(_mm_setr_epi32(0,0,0,0), a); + return psub(_mm_setr_epi32(0,0,0,0), a); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmul(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pmul(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmul(const Packet4i& a, const Packet4i& b) +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_mullo_epi32(a,b); #else // this version is slightly faster than 4 scalar products - return ei_vec4i_swizzle1( - ei_vec4i_swizzle2( + return vec4i_swizzle1( + vec4i_swizzle2( _mm_mul_epu32(a,b), - _mm_mul_epu32(ei_vec4i_swizzle1(a,1,0,3,2), - ei_vec4i_swizzle1(b,1,0,3,2)), + _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2), + vec4i_swizzle1(b,1,0,3,2)), 0,2,0,2), 0,2,1,3); #endif } -template<> EIGEN_STRONG_INLINE Packet4f ei_pdiv(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pdiv(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) -{ ei_assert(false && "packet integer division are not supported by SSE"); - return ei_pset1(0); +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by SSE"); + return pset1(0); } // for some weird raisons, it has to be overloaded for packet of integers -template<> EIGEN_STRONG_INLINE Packet4i ei_pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return ei_padd(ei_pmul(a,b), c); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmin(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pmin(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmin(const Packet4i& a, const Packet4i& b) +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { // after some bench, this version *is* faster than a scalar implementation Packet4i mask = _mm_cmplt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pmax(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pmax(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pmax(const Packet4i& a, const Packet4i& b) +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { // after some bench, this version *is* faster than a scalar implementation Packet4i mask = _mm_cmpgt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pand(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pand(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pand(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_por(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_por(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_por(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pxor(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pxor(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pxor(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pandnot(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pandnot(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pandnot(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } #if defined(_MSC_VER) - template<> EIGEN_STRONG_INLINE Packet4f ei_ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_ps(from); } - template<> EIGEN_STRONG_INLINE Packet2d ei_ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); } - template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } + template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { + EIGEN_DEBUG_UNALIGNED_LOAD + #if (_MSC_VER==1600) + // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps + // (i.e., it does not generate an unaligned load!! + // TODO On most architectures this version should also be faster than a single _mm_loadu_ps + // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so... + __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from)); + res = _mm_loadh_pi(res, (const __m64*)(from+2)); + return res; + #else + return _mm_loadu_ps(from); + #endif + } + template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); } + template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } #else // Fast unaligned loads. Note that here we cannot directly use intrinsics: this would // require pointer casting to incompatible pointer types and leads to invalid code @@ -230,97 +231,133 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_pload(const int* from) // a correct instruction dependency. // TODO: do the same for MSVC (ICC is compatible) // NOTE: with the code below, MSVC's compiler crashes! -template<> EIGEN_STRONG_INLINE Packet4f ei_ploadu(const float* from) + +#if defined(__GNUC__) && defined(__i386__) + // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd + #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1 +#elif defined(__clang__) + // bug 201: Segfaults in __mm_loadh_pd with clang 2.8 + #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1 +#else + #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0 +#endif + +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD +#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS + return _mm_loadu_ps(from); +#else __m128d res; res = _mm_load_sd((const double*)(from)) ; res = _mm_loadh_pd(res, (const double*)(from+2)) ; return _mm_castpd_ps(res); +#endif } -template<> EIGEN_STRONG_INLINE Packet2d ei_ploadu(const double* from) +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD +#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS + return _mm_loadu_pd(from); +#else __m128d res; res = _mm_load_sd(from) ; res = _mm_loadh_pd(res,from+1); return res; +#endif } -template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD +#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS + return _mm_loadu_si128(reinterpret_cast(from)); +#else __m128d res; res = _mm_load_sd((const double*)(from)) ; res = _mm_loadh_pd(res, (const double*)(from+2)) ; return _mm_castpd_si128(res); +#endif } #endif -template<> EIGEN_STRONG_INLINE Packet4f ei_ploaddup(const float* from) +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { - return ei_vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd((const double*)from)), 0, 0, 1, 1); + return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd((const double*)from)), 0, 0, 1, 1); } -template<> EIGEN_STRONG_INLINE Packet2d ei_ploaddup(const double* from) -{ return ei_pset1(from[0]); } -template<> EIGEN_STRONG_INLINE Packet4i ei_ploaddup(const int* from) +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ return pset1(from[0]); } +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) { Packet4i tmp; tmp = _mm_loadl_epi64(reinterpret_cast(from)); - return ei_vec4i_swizzle1(tmp, 0, 0, 1, 1); + return vec4i_swizzle1(tmp, 0, 0, 1, 1); } -template<> EIGEN_STRONG_INLINE void ei_pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } -template<> EIGEN_STRONG_INLINE void ei_pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } -template<> EIGEN_STRONG_INLINE void ei_pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast(to), from); } +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast(to), from); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu(double* to, const Packet2d& from) { +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storel_pd((to), from); _mm_storeh_pd((to+1), from); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE ei_pstoreu((double*)to, _mm_castps_pd(from)); } -template<> EIGEN_STRONG_INLINE void ei_pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE ei_pstoreu((double*)to, _mm_castsi128_pd(from)); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, _mm_castps_pd(from)); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, _mm_castsi128_pd(from)); } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } -template<> EIGEN_STRONG_INLINE void ei_prefetch(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } +// some compilers might be tempted to perform multiple moves instead of using a vector path. +template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) +{ + Packet4f pa = _mm_set_ss(a); + pstore(to, vec4f_swizzle1(pa,0,0,0,0)); +} +// some compilers might be tempted to perform multiple moves instead of using a vector path. +template<> EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) +{ + Packet2d pa = _mm_set_sd(a); + pstore(to, vec2d_swizzle1(pa,0,0)); +} -#if defined(_MSC_VER) && (_MSC_VER <= 1500) && defined(_WIN64) && !defined(__INTEL_COMPILER) -// The temporary variable fixes an internal compilation error. +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); } + +#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER) +// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 // Direct of the struct members fixed bug #62. -template<> EIGEN_STRONG_INLINE float ei_pfirst(const Packet4f& a) { return a.m128_f32[0]; } -template<> EIGEN_STRONG_INLINE double ei_pfirst(const Packet2d& a) { return a.m128d_f64[0]; } -template<> EIGEN_STRONG_INLINE int ei_pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } -#elif defined(_MSC_VER) && (_MSC_VER <= 1500) && !defined(__INTEL_COMPILER) -// The temporary variable fixes an internal compilation error. -template<> EIGEN_STRONG_INLINE float ei_pfirst(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } -template<> EIGEN_STRONG_INLINE double ei_pfirst(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } -template<> EIGEN_STRONG_INLINE int ei_pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return a.m128_f32[0]; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return a.m128d_f64[0]; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) +// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } #else -template<> EIGEN_STRONG_INLINE float ei_pfirst(const Packet4f& a) { return _mm_cvtss_f32(a); } -template<> EIGEN_STRONG_INLINE double ei_pfirst(const Packet2d& a) { return _mm_cvtsd_f64(a); } -template<> EIGEN_STRONG_INLINE int ei_pfirst(const Packet4i& a) { return _mm_cvtsi128_si32(a); } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return _mm_cvtss_f32(a); } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return _mm_cvtsd_f64(a); } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { return _mm_cvtsi128_si32(a); } #endif -template<> EIGEN_STRONG_INLINE Packet4f ei_preverse(const Packet4f& a) +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); } -template<> EIGEN_STRONG_INLINE Packet2d ei_preverse(const Packet2d& a) +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); } -template<> EIGEN_STRONG_INLINE Packet4i ei_preverse(const Packet4i& a) +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); } -template<> EIGEN_STRONG_INLINE Packet4f ei_pabs(const Packet4f& a) +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); return _mm_and_ps(a,mask); } -template<> EIGEN_STRONG_INLINE Packet2d ei_pabs(const Packet2d& a) +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); return _mm_and_pd(a,mask); } -template<> EIGEN_STRONG_INLINE Packet4i ei_pabs(const Packet4i& a) +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { #ifdef EIGEN_VECTORIZE_SSSE3 return _mm_abs_epi32(a); @@ -330,7 +367,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_pabs(const Packet4i& a) #endif } -EIGEN_STRONG_INLINE void ei_punpackp(Packet4f* vecs) +EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs) { vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55)); vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA)); @@ -340,47 +377,47 @@ EIGEN_STRONG_INLINE void ei_punpackp(Packet4f* vecs) #ifdef EIGEN_VECTORIZE_SSE3 // TODO implement SSE2 versions as well as integer versions -template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vecs) +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) { return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3])); } -template<> EIGEN_STRONG_INLINE Packet2d ei_preduxp(const Packet2d* vecs) +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) { return _mm_hadd_pd(vecs[0], vecs[1]); } // SSSE3 version: -// EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vecs) +// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) // { // return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3])); // } -template<> EIGEN_STRONG_INLINE float ei_predux(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { Packet4f tmp0 = _mm_hadd_ps(a,a); - return ei_pfirst(_mm_hadd_ps(tmp0, tmp0)); + return pfirst(_mm_hadd_ps(tmp0, tmp0)); } -template<> EIGEN_STRONG_INLINE double ei_predux(const Packet2d& a) { return ei_pfirst(_mm_hadd_pd(a, a)); } +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); } // SSSE3 version: -// EIGEN_STRONG_INLINE float ei_predux(const Packet4i& a) +// EIGEN_STRONG_INLINE float predux(const Packet4i& a) // { // Packet4i tmp0 = _mm_hadd_epi32(a,a); -// return ei_pfirst(_mm_hadd_epi32(tmp0, tmp0)); +// return pfirst(_mm_hadd_epi32(tmp0, tmp0)); // } #else // SSE2 versions -template<> EIGEN_STRONG_INLINE float ei_predux(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); - return ei_pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); + return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } -template<> EIGEN_STRONG_INLINE double ei_predux(const Packet2d& a) +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { - return ei_pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); + return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); } -template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vecs) +template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) { Packet4f tmp0, tmp1, tmp2; tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]); @@ -394,19 +431,19 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_preduxp(const Packet4f* vec return _mm_add_ps(tmp0, tmp2); } -template<> EIGEN_STRONG_INLINE Packet2d ei_preduxp(const Packet2d* vecs) +template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) { return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1])); } #endif // SSE3 -template<> EIGEN_STRONG_INLINE int ei_predux(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) { Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); - return ei_pfirst(tmp) + ei_pfirst(_mm_shuffle_epi32(tmp, 1)); + return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); } -template<> EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vecs) +template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) { Packet4i tmp0, tmp1, tmp2; tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]); @@ -423,69 +460,69 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_preduxp(const Packet4i* vec // Other reduction functions: // mul -template<> EIGEN_STRONG_INLINE float ei_predux_mul(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) { Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a)); - return ei_pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); + return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } -template<> EIGEN_STRONG_INLINE double ei_predux_mul(const Packet2d& a) +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { - return ei_pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a))); + return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a))); } -template<> EIGEN_STRONG_INLINE int ei_predux_mul(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) { // after some experiments, it is seems this is the fastest way to implement it - // for GCC (eg., reusing ei_pmul is very slow !) + // for GCC (eg., reusing pmul is very slow !) // TODO try to call _mm_mul_epu32 directly EIGEN_ALIGN16 int aux[4]; - ei_pstore(aux, a); + pstore(aux, a); return (aux[0] * aux[1]) * (aux[2] * aux[3]);; } // min -template<> EIGEN_STRONG_INLINE float ei_predux_min(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) { Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a)); - return ei_pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); + return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } -template<> EIGEN_STRONG_INLINE double ei_predux_min(const Packet2d& a) +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) { - return ei_pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a))); + return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a))); } -template<> EIGEN_STRONG_INLINE int ei_predux_min(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) { // after some experiments, it is seems this is the fastest way to implement it - // for GCC (eg., it does not like using std::min after the ei_pstore !!) + // for GCC (eg., it does not like using std::min after the pstore !!) EIGEN_ALIGN16 int aux[4]; - ei_pstore(aux, a); + pstore(aux, a); register int aux0 = aux[0] EIGEN_STRONG_INLINE float ei_predux_max(const Packet4f& a) +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) { Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a)); - return ei_pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); + return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } -template<> EIGEN_STRONG_INLINE double ei_predux_max(const Packet2d& a) +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) { - return ei_pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a))); + return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a))); } -template<> EIGEN_STRONG_INLINE int ei_predux_max(const Packet4i& a) +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) { // after some experiments, it is seems this is the fastest way to implement it - // for GCC (eg., it does not like using std::min after the ei_pstore !!) + // for GCC (eg., it does not like using std::min after the pstore !!) EIGEN_ALIGN16 int aux[4]; - ei_pstore(aux, a); + pstore(aux, a); register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; return aux0>aux2 ? aux0 : aux2; } #if (defined __GNUC__) -// template <> EIGEN_STRONG_INLINE Packet4f ei_pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) +// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) // { // Packet4f res = b; // asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c)); @@ -502,7 +539,7 @@ template<> EIGEN_STRONG_INLINE int ei_predux_max(const Packet4i& a) #ifdef EIGEN_VECTORIZE_SSSE3 // SSSE3 versions template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) { @@ -512,7 +549,7 @@ struct ei_palign_impl }; template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) { @@ -522,7 +559,7 @@ struct ei_palign_impl }; template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet2d& first, const Packet2d& second) { @@ -533,7 +570,7 @@ struct ei_palign_impl #else // SSE2 versions template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second) { @@ -556,7 +593,7 @@ struct ei_palign_impl }; template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second) { @@ -579,7 +616,7 @@ struct ei_palign_impl }; template -struct ei_palign_impl +struct palign_impl { EIGEN_STRONG_INLINE static void run(Packet2d& first, const Packet2d& second) { @@ -592,4 +629,6 @@ struct ei_palign_impl }; #endif +} // end namespace internal + #endif // EIGEN_PACKET_MATH_SSE_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/CoeffBasedProduct.h b/gtsam/3rdparty/Eigen/src/Core/products/CoeffBasedProduct.h index d2e693861..dc20f7e1e 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/CoeffBasedProduct.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/CoeffBasedProduct.h @@ -26,6 +26,8 @@ #ifndef EIGEN_COEFFBASED_PRODUCT_H #define EIGEN_COEFFBASED_PRODUCT_H +namespace internal { + /********************************************************************************* * Coefficient based product implementation. * It is designed for the following use cases: @@ -40,22 +42,22 @@ */ template -struct ei_product_coeff_impl; +struct product_coeff_impl; template -struct ei_product_packet_impl; +struct product_packet_impl; template -struct ei_traits > +struct traits > { typedef MatrixXpr XprKind; - typedef typename ei_cleantype::type _LhsNested; - typedef typename ei_cleantype::type _RhsNested; - typedef typename ei_scalar_product_traits::ReturnType Scalar; - typedef typename ei_promote_storage_type::StorageKind, - typename ei_traits<_RhsNested>::StorageKind>::ret StorageKind; - typedef typename ei_promote_index_type::Index, - typename ei_traits<_RhsNested>::Index>::type Index; + typedef typename remove_all::type _LhsNested; + typedef typename remove_all::type _RhsNested; + typedef typename scalar_product_traits::ReturnType Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits<_RhsNested>::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits<_RhsNested>::Index>::type Index; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, @@ -73,18 +75,18 @@ struct ei_traits > LhsRowMajor = LhsFlags & RowMajorBit, RhsRowMajor = RhsFlags & RowMajorBit, - SameType = ei_is_same_type::ret, + SameType = is_same::value, CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime == Dynamic - || ( (ColsAtCompileTime % ei_packet_traits::size) == 0 + || ( (ColsAtCompileTime % packet_traits::size) == 0 && (RhsFlags&AlignedBit) ) ), CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) && (RowsAtCompileTime == Dynamic - || ( (RowsAtCompileTime % ei_packet_traits::size) == 0 + || ( (RowsAtCompileTime % packet_traits::size) == 0 && (LhsFlags&AlignedBit) ) ), @@ -96,6 +98,7 @@ struct ei_traits > Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit) | (EvalToRowMajor ? RowMajorBit : 0) | NestingFlags + | (LhsFlags & RhsFlags & AlignedBit) // TODO enable vectorization for mixed types | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0), @@ -113,13 +116,15 @@ struct ei_traits > && (!RhsRowMajor) && (LhsFlags & RhsFlags & ActualPacketAccessBit) && (LhsFlags & RhsFlags & AlignedBit) - && (InnerSize % ei_packet_traits::size == 0) + && (InnerSize % packet_traits::size == 0) }; }; +} // end namespace internal + template class CoeffBasedProduct - : ei_no_assignment_operator, + : internal::no_assignment_operator, public MatrixBase > { public: @@ -130,19 +135,19 @@ class CoeffBasedProduct private: - typedef typename ei_traits::_LhsNested _LhsNested; - typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename internal::traits::_LhsNested _LhsNested; + typedef typename internal::traits::_RhsNested _RhsNested; enum { - PacketSize = ei_packet_traits::size, - InnerSize = ei_traits::InnerSize, + PacketSize = internal::packet_traits::size, + InnerSize = internal::traits::InnerSize, Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT, - CanVectorizeInner = ei_traits::CanVectorizeInner + CanVectorizeInner = internal::traits::CanVectorizeInner }; - typedef ei_product_coeff_impl ScalarCoeffImpl; + typedef internal::product_coeff_impl ScalarCoeffImpl; typedef CoeffBasedProduct LazyCoeffBasedProductType; @@ -158,9 +163,9 @@ class CoeffBasedProduct { // we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable. // We still allow to mix T and complex. - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - ei_assert(lhs.cols() == rhs.rows() + eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } @@ -191,9 +196,9 @@ class CoeffBasedProduct EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const { PacketScalar res; - ei_product_packet_impl + internal::product_packet_impl ::run(row, col, m_lhs, m_rhs, res); return res; } @@ -208,14 +213,14 @@ class CoeffBasedProduct const _LhsNested& lhs() const { return m_lhs; } const _RhsNested& rhs() const { return m_rhs; } - const Diagonal diagonal() const + const Diagonal diagonal() const { return reinterpret_cast(*this); } template - const Diagonal diagonal() const + const Diagonal diagonal() const { return reinterpret_cast(*this); } - const Diagonal diagonal(Index index) const + const Diagonal diagonal(Index index) const { return reinterpret_cast(*this).diagonal(index); } protected: @@ -225,10 +230,12 @@ class CoeffBasedProduct mutable PlainObject m_result; }; +namespace internal { + // here we need to overload the nested rule for products // such that the nested type is a const reference to a plain matrix template -struct ei_nested, N, PlainObject> +struct nested, N, PlainObject> { typedef PlainObject const& type; }; @@ -242,18 +249,18 @@ struct ei_nested -struct ei_product_coeff_impl +struct product_coeff_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { - ei_product_coeff_impl::run(row, col, lhs, rhs, res); + product_coeff_impl::run(row, col, lhs, rhs, res); res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col); } }; template -struct ei_product_coeff_impl +struct product_coeff_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) @@ -263,12 +270,12 @@ struct ei_product_coeff_impl }; template -struct ei_product_coeff_impl +struct product_coeff_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) { - ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); + eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = lhs.coeff(row, 0) * rhs.coeff(0, col); for(Index i = 1; i < lhs.cols(); ++i) res += lhs.coeff(row, i) * rhs.coeff(i, col); @@ -280,44 +287,44 @@ struct ei_product_coeff_impl *******************************************/ template -struct ei_product_coeff_vectorized_unroller +struct product_coeff_vectorized_unroller { typedef typename Lhs::Index Index; - enum { PacketSize = ei_packet_traits::size }; + enum { PacketSize = packet_traits::size }; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) { - ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); - pres = ei_padd(pres, ei_pmul( lhs.template packet(row, UnrollingIndex) , rhs.template packet(UnrollingIndex, col) )); + product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); + pres = padd(pres, pmul( lhs.template packet(row, UnrollingIndex) , rhs.template packet(UnrollingIndex, col) )); } }; template -struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet> +struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet> { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) { - pres = ei_pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); + pres = pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); } }; template -struct ei_product_coeff_impl +struct product_coeff_impl { typedef typename Lhs::PacketScalar Packet; typedef typename Lhs::Index Index; - enum { PacketSize = ei_packet_traits::size }; + enum { PacketSize = packet_traits::size }; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { Packet pres; - ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); - ei_product_coeff_impl::run(row, col, lhs, rhs, res); - res = ei_predux(pres); + product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); + product_coeff_impl::run(row, col, lhs, rhs, res); + res = predux(pres); } }; template -struct ei_product_coeff_vectorized_dyn_selector +struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) @@ -329,7 +336,7 @@ struct ei_product_coeff_vectorized_dyn_selector // NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower // NOTE maybe they are now useless since we have a specialization for Block template -struct ei_product_coeff_vectorized_dyn_selector +struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) @@ -339,7 +346,7 @@ struct ei_product_coeff_vectorized_dyn_selector }; template -struct ei_product_coeff_vectorized_dyn_selector +struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) @@ -349,7 +356,7 @@ struct ei_product_coeff_vectorized_dyn_selector }; template -struct ei_product_coeff_vectorized_dyn_selector +struct product_coeff_vectorized_dyn_selector { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) @@ -359,12 +366,12 @@ struct ei_product_coeff_vectorized_dyn_selector }; template -struct ei_product_coeff_impl +struct product_coeff_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { - ei_product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, res); + product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, res); } }; @@ -373,71 +380,73 @@ struct ei_product_coeff_impl -struct ei_product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { - ei_product_packet_impl::run(row, col, lhs, rhs, res); - res = ei_pmadd(ei_pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); + product_packet_impl::run(row, col, lhs, rhs, res); + res = pmadd(pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); } }; template -struct ei_product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { - ei_product_packet_impl::run(row, col, lhs, rhs, res); - res = ei_pmadd(lhs.template packet(row, UnrollingIndex), ei_pset1(rhs.coeff(UnrollingIndex, col)), res); + product_packet_impl::run(row, col, lhs, rhs, res); + res = pmadd(lhs.template packet(row, UnrollingIndex), pset1(rhs.coeff(UnrollingIndex, col)), res); } }; template -struct ei_product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { - res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); + res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); } }; template -struct ei_product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { - res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); + res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); } }; template -struct ei_product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) { - ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); + eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); + res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); for(Index i = 1; i < lhs.cols(); ++i) - res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); + res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); } }; template -struct ei_product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) { - ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); + eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); + res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); for(Index i = 1; i < lhs.cols(); ++i) - res = ei_pmadd(lhs.template packet(row, i), ei_pset1(rhs.coeff(i, col)), res); + res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); } }; +} // end namespace internal + #endif // EIGEN_COEFFBASED_PRODUCT_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/gtsam/3rdparty/Eigen/src/Core/products/GeneralBlockPanelKernel.h index 7e2d496fe..2116dcc74 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -25,18 +25,20 @@ #ifndef EIGEN_GENERAL_BLOCK_PANEL_H #define EIGEN_GENERAL_BLOCK_PANEL_H +namespace internal { + template -class ei_gebp_traits; +class gebp_traits; /** \internal */ -inline void ei_manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0) +inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0) { static std::ptrdiff_t m_l1CacheSize = 0; static std::ptrdiff_t m_l2CacheSize = 0; if(m_l1CacheSize==0) { - m_l1CacheSize = ei_queryL1CacheSize(); - m_l2CacheSize = ei_queryTopLevelCacheSize(); + m_l1CacheSize = queryL1CacheSize(); + m_l2CacheSize = queryTopLevelCacheSize(); if(m_l1CacheSize<=0) m_l1CacheSize = 8 * 1024; if(m_l2CacheSize<=0) m_l2CacheSize = 1 * 1024 * 1024; @@ -45,50 +47,22 @@ inline void ei_manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::pt if(action==SetAction) { // set the cpu cache size and cache all block sizes from a global cache size in byte - ei_internal_assert(l1!=0 && l2!=0); + eigen_internal_assert(l1!=0 && l2!=0); m_l1CacheSize = *l1; m_l2CacheSize = *l2; } else if(action==GetAction) { - ei_internal_assert(l1!=0 && l2!=0); + eigen_internal_assert(l1!=0 && l2!=0); *l1 = m_l1CacheSize; *l2 = m_l2CacheSize; } else { - ei_internal_assert(false); + eigen_internal_assert(false); } } -/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters. - * \sa setCpuCacheSize */ -inline std::ptrdiff_t l1CacheSize() -{ - std::ptrdiff_t l1, l2; - ei_manage_caching_sizes(GetAction, &l1, &l2); - return l1; -} - -/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters. - * \sa setCpuCacheSize */ -inline std::ptrdiff_t l2CacheSize() -{ - std::ptrdiff_t l1, l2; - ei_manage_caching_sizes(GetAction, &l1, &l2); - return l2; -} - -/** Set the cpu L1 and L2 cache sizes (in bytes). - * These values are use to adjust the size of the blocks - * for the algorithms working per blocks. - * - * \sa computeProductBlockingSizes */ -inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2) -{ - ei_manage_caching_sizes(SetAction, &l1, &l2); -} - /** \brief Computes the blocking parameters for a m x k times k x n matrix product * * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension. @@ -100,7 +74,7 @@ inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2) * for matrix products and related algorithms. The blocking sizes depends on various * parameters: * - the L1 and L2 cache sizes, - * - the register level blocking sizes defined by ei_gebp_traits, + * - the register level blocking sizes defined by gebp_traits, * - the number of scalars that fit into a packet (when vectorization is enabled). * * \sa setCpuCacheSizes */ @@ -116,15 +90,15 @@ void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrd // stay in L1 cache. std::ptrdiff_t l1, l2; - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; enum { kdiv = KcFactor * 2 * Traits::nr * Traits::RhsProgress * sizeof(RhsScalar), - mr = ei_gebp_traits::mr, + mr = gebp_traits::mr, mr_mask = (0xffffffff/mr)*mr }; - ei_manage_caching_sizes(GetAction, &l1, &l2); + manage_caching_sizes(GetAction, &l1, &l2); k = std::min(k, l1/kdiv); std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0; if(_m struct ei_gebp_madd_selector { + template struct gebp_madd_selector { EIGEN_STRONG_INLINE EIGEN_ALWAYS_INLINE_ATTRIB static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/) { c = cj.pmadd(a,b,c); } }; - template struct ei_gebp_madd_selector { + template struct gebp_madd_selector { EIGEN_STRONG_INLINE EIGEN_ALWAYS_INLINE_ATTRIB static void run(const CJ& cj, T& a, T& b, T& c, T& t) { - t = b; t = cj.pmul(a,t); c = ei_padd(c,t); + t = b; t = cj.pmul(a,t); c = padd(c,t); } }; template - EIGEN_STRONG_INLINE void ei_gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t) + EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t) { - ei_gebp_madd_selector::run(cj,a,b,c,t); + gebp_madd_selector::run(cj,a,b,c,t); } - #define MADD(CJ,A,B,C,T) ei_gebp_madd(CJ,A,B,C,T); -// #define MADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = ei_padd(C,T); + #define MADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T); +// #define MADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T); #endif /* Vectorization logic @@ -178,20 +152,20 @@ inline void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, st * real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual */ template -class ei_gebp_traits +class gebp_traits { public: typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; - typedef typename ei_scalar_product_traits::ReturnType ResScalar; + typedef typename scalar_product_traits::ReturnType ResScalar; enum { ConjLhs = _ConjLhs, ConjRhs = _ConjRhs, - Vectorizable = ei_packet_traits::Vectorizable && ei_packet_traits::Vectorizable, - LhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - RhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - ResPacketSize = Vectorizable ? ei_packet_traits::size : 1, + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, @@ -207,67 +181,67 @@ public: RhsProgress = RhsPacketSize }; - typedef typename ei_packet_traits::type _LhsPacket; - typedef typename ei_packet_traits::type _RhsPacket; - typedef typename ei_packet_traits::type _ResPacket; + typedef typename packet_traits::type _LhsPacket; + typedef typename packet_traits::type _RhsPacket; + typedef typename packet_traits::type _ResPacket; - typedef typename ei_meta_if::ret LhsPacket; - typedef typename ei_meta_if::ret RhsPacket; - typedef typename ei_meta_if::ret ResPacket; + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; typedef ResPacket AccPacket; EIGEN_STRONG_INLINE void initAcc(AccPacket& p) { - p = ei_pset1(ResScalar(0)); + p = pset1(ResScalar(0)); } EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b) { for(DenseIndex k=0; k(rhs[k])); + pstore1(&b[k*RhsPacketSize], rhs[k]); } EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { - dest = ei_pload(b); + dest = pload(b); } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { - dest = ei_pload(a); + dest = pload(a); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const { - tmp = b; tmp = ei_pmul(a,tmp); c = ei_padd(c,tmp); + tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp); } EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const { - r = ei_pmadd(c,alpha,r); + r = pmadd(c,alpha,r); } protected: -// ei_conj_helper cj; -// ei_conj_helper pcj; +// conj_helper cj; +// conj_helper pcj; }; template -class ei_gebp_traits, RealScalar, _ConjLhs, false> +class gebp_traits, RealScalar, _ConjLhs, false> { public: typedef std::complex LhsScalar; typedef RealScalar RhsScalar; - typedef typename ei_scalar_product_traits::ReturnType ResScalar; + typedef typename scalar_product_traits::ReturnType ResScalar; enum { ConjLhs = _ConjLhs, ConjRhs = false, - Vectorizable = ei_packet_traits::Vectorizable && ei_packet_traits::Vectorizable, - LhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - RhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - ResPacketSize = Vectorizable ? ei_packet_traits::size : 1, + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, nr = NumberOfRegisters/4, @@ -278,48 +252,48 @@ public: RhsProgress = RhsPacketSize }; - typedef typename ei_packet_traits::type _LhsPacket; - typedef typename ei_packet_traits::type _RhsPacket; - typedef typename ei_packet_traits::type _ResPacket; + typedef typename packet_traits::type _LhsPacket; + typedef typename packet_traits::type _RhsPacket; + typedef typename packet_traits::type _ResPacket; - typedef typename ei_meta_if::ret LhsPacket; - typedef typename ei_meta_if::ret RhsPacket; - typedef typename ei_meta_if::ret ResPacket; + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; typedef ResPacket AccPacket; EIGEN_STRONG_INLINE void initAcc(AccPacket& p) { - p = ei_pset1(ResScalar(0)); + p = pset1(ResScalar(0)); } EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b) { for(DenseIndex k=0; k(rhs[k])); + pstore1(&b[k*RhsPacketSize], rhs[k]); } EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { - dest = ei_pload(b); + dest = pload(b); } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { - dest = ei_pload(a); + dest = pload(a); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const { - madd_impl(a, b, c, tmp, typename ei_meta_if::ret()); + madd_impl(a, b, c, tmp, typename conditional::type()); } - EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const ei_meta_true&) const + EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const { - tmp = b; tmp = ei_pmul(a.v,tmp); c.v = ei_padd(c.v,tmp); + tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp); } - EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const ei_meta_false&) const + EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const { c += a * b; } @@ -330,11 +304,11 @@ public: } protected: - ei_conj_helper cj; + conj_helper cj; }; template -class ei_gebp_traits, std::complex, _ConjLhs, _ConjRhs > +class gebp_traits, std::complex, _ConjLhs, _ConjRhs > { public: typedef std::complex Scalar; @@ -345,10 +319,10 @@ public: enum { ConjLhs = _ConjLhs, ConjRhs = _ConjRhs, - Vectorizable = ei_packet_traits::Vectorizable - && ei_packet_traits::Vectorizable, - RealPacketSize = Vectorizable ? ei_packet_traits::size : 1, - ResPacketSize = Vectorizable ? ei_packet_traits::size : 1, + Vectorizable = packet_traits::Vectorizable + && packet_traits::Vectorizable, + RealPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, nr = 2, mr = 2 * ResPacketSize, @@ -358,25 +332,25 @@ public: RhsProgress = Vectorizable ? 2*ResPacketSize : 1 }; - typedef typename ei_packet_traits::type RealPacket; - typedef typename ei_packet_traits::type ScalarPacket; + typedef typename packet_traits::type RealPacket; + typedef typename packet_traits::type ScalarPacket; struct DoublePacket { RealPacket first; RealPacket second; }; - typedef typename ei_meta_if::ret LhsPacket; - typedef typename ei_meta_if::ret RhsPacket; - typedef typename ei_meta_if::ret ResPacket; - typedef typename ei_meta_if::ret AccPacket; + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; + typedef typename conditional::type AccPacket; EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); } EIGEN_STRONG_INLINE void initAcc(DoublePacket& p) { - p.first = ei_pset1(RealScalar(0)); - p.second = ei_pset1(RealScalar(0)); + p.first = pset1(RealScalar(0)); + p.second = pset1(RealScalar(0)); } /* Unpack the rhs coeff such that each complex coefficient is spread into @@ -389,8 +363,8 @@ public: { if(Vectorizable) { - ei_pstore((RealScalar*)&b[k*ResPacketSize*2+0], ei_pset1(ei_real(rhs[k]))); - ei_pstore((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], ei_pset1(ei_imag(rhs[k]))); + pstore1((RealScalar*)&b[k*ResPacketSize*2+0], real(rhs[k])); + pstore1((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k])); } else b[k] = rhs[k]; @@ -401,20 +375,20 @@ public: EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const { - dest.first = ei_pload((const RealScalar*)b); - dest.second = ei_pload((const RealScalar*)(b+ResPacketSize)); + dest.first = pload((const RealScalar*)b); + dest.second = pload((const RealScalar*)(b+ResPacketSize)); } // nothing special here EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { - dest = ei_pload((const typename ei_unpacket_traits::type*)(a)); + dest = pload((const typename unpacket_traits::type*)(a)); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacket& c, RhsPacket& /*tmp*/) const { - c.first = ei_padd(ei_pmul(a,b.first), c.first); - c.second = ei_padd(ei_pmul(a,b.second),c.second); + c.first = padd(pmul(a,b.first), c.first); + c.second = padd(pmul(a,b.second),c.second); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const @@ -430,34 +404,34 @@ public: ResPacket tmp; if((!ConjLhs)&&(!ConjRhs)) { - tmp = ei_pcplxflip(ei_pconj(ResPacket(c.second))); - tmp = ei_padd(ResPacket(c.first),tmp); + tmp = pcplxflip(pconj(ResPacket(c.second))); + tmp = padd(ResPacket(c.first),tmp); } else if((!ConjLhs)&&(ConjRhs)) { - tmp = ei_pconj(ei_pcplxflip(ResPacket(c.second))); - tmp = ei_padd(ResPacket(c.first),tmp); + tmp = pconj(pcplxflip(ResPacket(c.second))); + tmp = padd(ResPacket(c.first),tmp); } else if((ConjLhs)&&(!ConjRhs)) { - tmp = ei_pcplxflip(ResPacket(c.second)); - tmp = ei_padd(ei_pconj(ResPacket(c.first)),tmp); + tmp = pcplxflip(ResPacket(c.second)); + tmp = padd(pconj(ResPacket(c.first)),tmp); } else if((ConjLhs)&&(ConjRhs)) { - tmp = ei_pcplxflip(ResPacket(c.second)); - tmp = ei_psub(ei_pconj(ResPacket(c.first)),tmp); + tmp = pcplxflip(ResPacket(c.second)); + tmp = psub(pconj(ResPacket(c.first)),tmp); } - r = ei_pmadd(tmp,alpha,r); + r = pmadd(tmp,alpha,r); } protected: - ei_conj_helper cj; + conj_helper cj; }; template -class ei_gebp_traits, false, _ConjRhs > +class gebp_traits, false, _ConjRhs > { public: typedef std::complex Scalar; @@ -468,11 +442,11 @@ public: enum { ConjLhs = false, ConjRhs = _ConjRhs, - Vectorizable = ei_packet_traits::Vectorizable - && ei_packet_traits::Vectorizable, - LhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - RhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - ResPacketSize = Vectorizable ? ei_packet_traits::size : 1, + Vectorizable = packet_traits::Vectorizable + && packet_traits::Vectorizable, + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, nr = 4, @@ -483,48 +457,48 @@ public: RhsProgress = ResPacketSize }; - typedef typename ei_packet_traits::type _LhsPacket; - typedef typename ei_packet_traits::type _RhsPacket; - typedef typename ei_packet_traits::type _ResPacket; + typedef typename packet_traits::type _LhsPacket; + typedef typename packet_traits::type _RhsPacket; + typedef typename packet_traits::type _ResPacket; - typedef typename ei_meta_if::ret LhsPacket; - typedef typename ei_meta_if::ret RhsPacket; - typedef typename ei_meta_if::ret ResPacket; + typedef typename conditional::type LhsPacket; + typedef typename conditional::type RhsPacket; + typedef typename conditional::type ResPacket; typedef ResPacket AccPacket; EIGEN_STRONG_INLINE void initAcc(AccPacket& p) { - p = ei_pset1(ResScalar(0)); + p = pset1(ResScalar(0)); } EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b) { for(DenseIndex k=0; k(rhs[k])); + pstore1(&b[k*RhsPacketSize], rhs[k]); } EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { - dest = ei_pload(b); + dest = pload(b); } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { - dest = ei_ploaddup(a); + dest = ploaddup(a); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const { - madd_impl(a, b, c, tmp, typename ei_meta_if::ret()); + madd_impl(a, b, c, tmp, typename conditional::type()); } - EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const ei_meta_true&) const + EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const { - tmp = b; tmp.v = ei_pmul(a,tmp.v); c = ei_padd(c,tmp); + tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp); } - EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const ei_meta_false&) const + EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const { c += a * b; } @@ -535,7 +509,7 @@ public: } protected: - ei_conj_helper cj; + conj_helper cj; }; /* optimized GEneral packed Block * packed Panel product kernel @@ -546,9 +520,9 @@ protected: * |cplx |real | easy vectorization */ template -struct ei_gebp_kernel +struct gebp_kernel { - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; typedef typename Traits::ResScalar ResScalar; typedef typename Traits::LhsPacket LhsPacket; typedef typename Traits::RhsPacket RhsPacket; @@ -570,8 +544,8 @@ struct ei_gebp_kernel if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; - ei_conj_helper cj; -// ei_conj_helper pcj; + conj_helper cj; +// conj_helper pcj; Index packet_cols = (cols/nr) * nr; const Index peeled_mc = (rows/mr)*mr; // FIXME: @@ -592,7 +566,7 @@ struct ei_gebp_kernel for(Index i=0; i(alpha); + if(nr==4) + { + ResPacket R0, R1, R2, R3, R4, R5, R6; + ResPacket alphav = pset1(alpha); - R0 = ei_ploadu(r0); - R1 = ei_ploadu(r1); - if(nr==4) R2 = ei_ploadu(r2); - if(nr==4) R3 = ei_ploadu(r3); - R4 = ei_ploadu(r0 + ResPacketSize); - R5 = ei_ploadu(r1 + ResPacketSize); - if(nr==4) R6 = ei_ploadu(r2 + ResPacketSize); - if(nr==4) R7 = ei_ploadu(r3 + ResPacketSize); + R0 = ploadu(r0); + R1 = ploadu(r1); + R2 = ploadu(r2); + R3 = ploadu(r3); + R4 = ploadu(r0 + ResPacketSize); + R5 = ploadu(r1 + ResPacketSize); + R6 = ploadu(r2 + ResPacketSize); + traits.acc(C0, alphav, R0); + pstoreu(r0, R0); + R0 = ploadu(r3 + ResPacketSize); - traits.acc(C0, alphav, R0); - traits.acc(C1, alphav, R1); - if(nr==4) traits.acc(C2, alphav, R2); - if(nr==4) traits.acc(C3, alphav, R3); - traits.acc(C4, alphav, R4); - traits.acc(C5, alphav, R5); - if(nr==4) traits.acc(C6, alphav, R6); - if(nr==4) traits.acc(C7, alphav, R7); + traits.acc(C1, alphav, R1); + traits.acc(C2, alphav, R2); + traits.acc(C3, alphav, R3); + traits.acc(C4, alphav, R4); + traits.acc(C5, alphav, R5); + traits.acc(C6, alphav, R6); + traits.acc(C7, alphav, R0); + + pstoreu(r1, R1); + pstoreu(r2, R2); + pstoreu(r3, R3); + pstoreu(r0 + ResPacketSize, R4); + pstoreu(r1 + ResPacketSize, R5); + pstoreu(r2 + ResPacketSize, R6); + pstoreu(r3 + ResPacketSize, R0); + } + else + { + ResPacket R0, R1, R4; + ResPacket alphav = pset1(alpha); - ei_pstoreu(r0, R0); - ei_pstoreu(r1, R1); - if(nr==4) ei_pstoreu(r2, R2); - if(nr==4) ei_pstoreu(r3, R3); - ei_pstoreu(r0 + ResPacketSize, R4); - ei_pstoreu(r1 + ResPacketSize, R5); - if(nr==4) ei_pstoreu(r2 + ResPacketSize, R6); - if(nr==4) ei_pstoreu(r3 + ResPacketSize, R7); + R0 = ploadu(r0); + R1 = ploadu(r1); + R4 = ploadu(r0 + ResPacketSize); + traits.acc(C0, alphav, R0); + pstoreu(r0, R0); + R0 = ploadu(r1 + ResPacketSize); + traits.acc(C1, alphav, R1); + traits.acc(C4, alphav, R4); + traits.acc(C5, alphav, R0); + pstoreu(r1, R1); + pstoreu(r0 + ResPacketSize, R4); + pstoreu(r1 + ResPacketSize, R0); + } + } if(rows-peeled_mc>=LhsProgress) { Index i = peeled_mc; const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress]; - ei_prefetch(&blA[0]); + prefetch(&blA[0]); // gets res block as register AccPacket C0, C1, C2, C3; @@ -939,32 +935,32 @@ EIGEN_ASM_COMMENT("mybegin4"); } ResPacket R0, R1, R2, R3; - ResPacket alphav = ei_pset1(alpha); + ResPacket alphav = pset1(alpha); ResScalar* r0 = &res[(j2+0)*resStride + i]; ResScalar* r1 = r0 + resStride; ResScalar* r2 = r1 + resStride; ResScalar* r3 = r2 + resStride; - R0 = ei_ploadu(r0); - R1 = ei_ploadu(r1); - if(nr==4) R2 = ei_ploadu(r2); - if(nr==4) R3 = ei_ploadu(r3); + R0 = ploadu(r0); + R1 = ploadu(r1); + if(nr==4) R2 = ploadu(r2); + if(nr==4) R3 = ploadu(r3); traits.acc(C0, alphav, R0); traits.acc(C1, alphav, R1); if(nr==4) traits.acc(C2, alphav, R2); if(nr==4) traits.acc(C3, alphav, R3); - ei_pstoreu(r0, R0); - ei_pstoreu(r1, R1); - if(nr==4) ei_pstoreu(r2, R2); - if(nr==4) ei_pstoreu(r3, R3); + pstoreu(r0, R0); + pstoreu(r1, R1); + if(nr==4) pstoreu(r2, R2); + if(nr==4) pstoreu(r3, R3); } for(Index i=peeled_mc2; i(blB[k])); - } + traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB); for(Index i=0; i(alpha); + ResPacket alphav = pset1(alpha); ResScalar* r0 = &res[(j2+0)*resStride + i]; - R0 = ei_ploadu(r0); - R4 = ei_ploadu(r0+ResPacketSize); + R0 = ploadu(r0); + R4 = ploadu(r0+ResPacketSize); traits.acc(C0, alphav, R0); traits.acc(C4, alphav, R4); - ei_pstoreu(r0, R0); - ei_pstoreu(r0+ResPacketSize, R4); + pstoreu(r0, R0); + pstoreu(r0+ResPacketSize, R4); } if(rows-peeled_mc>=LhsProgress) { Index i = peeled_mc; const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress]; - ei_prefetch(&blA[0]); + prefetch(&blA[0]); AccPacket C0; traits.initAcc(C0); @@ -1083,15 +1074,15 @@ EIGEN_ASM_COMMENT("mybegin4"); blA += LhsProgress; } - ResPacket alphav = ei_pset1(alpha); - ResPacket R0 = ei_ploadu(&res[(j2+0)*resStride + i]); + ResPacket alphav = pset1(alpha); + ResPacket R0 = ploadu(&res[(j2+0)*resStride + i]); traits.acc(C0, alphav, R0); - ei_pstoreu(&res[(j2+0)*resStride + i], R0); + pstoreu(&res[(j2+0)*resStride + i], R0); } for(Index i=peeled_mc2; i -struct ei_gemm_pack_lhs +struct gemm_pack_lhs { void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0) { -// enum { PacketSize = ei_packet_traits::size }; - ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); - ei_conj_if::IsComplex && Conjugate> cj; - ei_const_blas_data_mapper lhs(_lhs,lhsStride); +// enum { PacketSize = packet_traits::size }; + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; + const_blas_data_mapper lhs(_lhs,lhsStride); Index count = 0; Index peeled_mc = (rows/Pack1)*Pack1; for(Index i=0; i -struct ei_gemm_pack_rhs +struct gemm_pack_rhs { - typedef typename ei_packet_traits::type Packet; - enum { PacketSize = ei_packet_traits::size }; + typedef typename packet_traits::type Packet; + enum { PacketSize = packet_traits::size }; void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0) { - ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); - ei_conj_if::IsComplex && Conjugate> cj; + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; Index packet_cols = (cols/nr) * nr; Index count = 0; for(Index j2=0; j2 // this version is optimized for row major matrices template -struct ei_gemm_pack_rhs +struct gemm_pack_rhs { - enum { PacketSize = ei_packet_traits::size }; + enum { PacketSize = packet_traits::size }; void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0) { - ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); - ei_conj_if::IsComplex && Conjugate> cj; + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; Index packet_cols = (cols/nr) * nr; Index count = 0; for(Index j2=0; j2 } }; +} // end namespace internal + +/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters. + * \sa setCpuCacheSize */ +inline std::ptrdiff_t l1CacheSize() +{ + std::ptrdiff_t l1, l2; + internal::manage_caching_sizes(GetAction, &l1, &l2); + return l1; +} + +/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters. + * \sa setCpuCacheSize */ +inline std::ptrdiff_t l2CacheSize() +{ + std::ptrdiff_t l1, l2; + internal::manage_caching_sizes(GetAction, &l1, &l2); + return l2; +} + +/** Set the cpu L1 and L2 cache sizes (in bytes). + * These values are use to adjust the size of the blocks + * for the algorithms working per blocks. + * + * \sa computeProductBlockingSizes */ +inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2) +{ + internal::manage_caching_sizes(SetAction, &l1, &l2); +} + #endif // EIGEN_GENERAL_BLOCK_PANEL_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrix.h b/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrix.h index 1cdfb84d1..7736a4b29 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -25,27 +25,29 @@ #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H -template class ei_level3_blocking; +namespace internal { + +template class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> -struct ei_general_matrix_matrix_product +struct general_matrix_matrix_product { - typedef typename ei_scalar_product_traits::ReturnType ResScalar; + typedef typename scalar_product_traits::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, - ei_level3_blocking& blocking, + level3_blocking& blocking, GemmParallelInfo* info = 0) { // transpose the product such that the result is column major - ei_general_matrix_matrix_product @@ -59,29 +61,29 @@ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> -struct ei_general_matrix_matrix_product +struct general_matrix_matrix_product { -typedef typename ei_scalar_product_traits::ReturnType ResScalar; +typedef typename scalar_product_traits::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, - ei_level3_blocking& blocking, + level3_blocking& blocking, GemmParallelInfo* info = 0) { - ei_const_blas_data_mapper lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); + const_blas_data_mapper rhs(_rhs,rhsStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; - ei_gebp_kernel gebp; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gebp_kernel gebp; #ifdef EIGEN_HAS_OPENMP if(info) @@ -89,12 +91,13 @@ static void run(Index rows, Index cols, Index depth, // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); - - LhsScalar* blockA = ei_aligned_stack_new(LhsScalar, kc*mc); + + std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; + LhsScalar* blockA = ei_aligned_stack_new(LhsScalar, sizeA); RhsScalar* w = ei_aligned_stack_new(RhsScalar, sizeW); RhsScalar* blockB = blocking.blockB(); - ei_internal_assert(blockB!=0); + eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k0) while(info[j].sync!=k) {} - gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); + gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' @@ -198,7 +201,7 @@ static void run(Index rows, Index cols, Index depth, } } - if(blocking.blockA()==0) ei_aligned_stack_delete(LhsScalar, blockA, kc*mc); + if(blocking.blockA()==0) ei_aligned_stack_delete(LhsScalar, blockA, sizeA); if(blocking.blockB()==0) ei_aligned_stack_delete(RhsScalar, blockB, sizeB); if(blocking.blockW()==0) ei_aligned_stack_delete(RhsScalar, blockW, sizeW); } @@ -208,18 +211,18 @@ static void run(Index rows, Index cols, Index depth, /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., -* implementation of the high level wrapper to ei_general_matrix_matrix_product +* implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; template -struct ei_gemm_functor +struct gemm_functor { - ei_gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha, + gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} @@ -235,8 +238,8 @@ struct ei_gemm_functor cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), - /*(const Scalar*)*/&(m_lhs.const_cast_derived().coeffRef(row,0)), m_lhs.outerStride(), - /*(const Scalar*)*/&(m_rhs.const_cast_derived().coeffRef(0,col)), m_rhs.outerStride(), + /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), + /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } @@ -250,10 +253,10 @@ struct ei_gemm_functor }; template class ei_gemm_blocking_space; +bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template -class ei_level3_blocking +class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; @@ -269,7 +272,7 @@ class ei_level3_blocking public: - ei_level3_blocking() + level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} @@ -283,19 +286,19 @@ class ei_level3_blocking }; template -class ei_gemm_blocking_space - : public ei_level3_blocking< - typename ei_meta_if::ret, - typename ei_meta_if::ret> +class gemm_blocking_space + : public level3_blocking< + typename conditional::type, + typename conditional::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; - typedef typename ei_meta_if::ret LhsScalar; - typedef typename ei_meta_if::ret RhsScalar; - typedef ei_gebp_traits Traits; + typedef typename conditional::type LhsScalar; + typedef typename conditional::type RhsScalar; + typedef gebp_traits Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, @@ -308,7 +311,7 @@ class ei_gemm_blocking_spacem_mc = ActualRows; this->m_nc = ActualCols; @@ -325,17 +328,17 @@ class ei_gemm_blocking_space -class ei_gemm_blocking_space - : public ei_level3_blocking< - typename ei_meta_if::ret, - typename ei_meta_if::ret> +class gemm_blocking_space + : public level3_blocking< + typename conditional::type, + typename conditional::type> { enum { Transpose = StorageOrder==RowMajor }; - typedef typename ei_meta_if::ret LhsScalar; - typedef typename ei_meta_if::ret RhsScalar; - typedef ei_gebp_traits Traits; + typedef typename conditional::type LhsScalar; + typedef typename conditional::type RhsScalar; + typedef gebp_traits Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; @@ -343,7 +346,7 @@ class ei_gemm_blocking_spacem_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; @@ -358,19 +361,19 @@ class ei_gemm_blocking_spacem_blockA==0) - this->m_blockA = ei_aligned_new(m_sizeA); + this->m_blockA = aligned_new(m_sizeA); } void allocateB() { if(this->m_blockB==0) - this->m_blockB = ei_aligned_new(m_sizeB); + this->m_blockB = aligned_new(m_sizeB); } void allocateW() { if(this->m_blockW==0) - this->m_blockW = ei_aligned_new(m_sizeW); + this->m_blockW = aligned_new(m_sizeW); } void allocateAll() @@ -380,14 +383,16 @@ class ei_gemm_blocking_spacem_blockA, m_sizeA); - ei_aligned_delete(this->m_blockB, m_sizeB); - ei_aligned_delete(this->m_blockW, m_sizeW); + aligned_delete(this->m_blockA, m_sizeA); + aligned_delete(this->m_blockB, m_sizeB); + aligned_delete(this->m_blockW, m_sizeW); } }; +} // end namespace internal + template class GeneralProduct : public ProductBase, Lhs, Rhs> @@ -404,13 +409,13 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { - typedef ei_scalar_product_op BinOp; + typedef internal::scalar_product_op BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); } template void scaleAndAddTo(Dest& dst, Scalar alpha) const { - ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); + eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); @@ -418,12 +423,12 @@ class GeneralProduct Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); - typedef ei_gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, + typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; - typedef ei_gemm_functor< + typedef internal::gemm_functor< Scalar, Index, - ei_general_matrix_matrix_product< + internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), @@ -432,7 +437,7 @@ class GeneralProduct BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); - ei_parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); + internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; diff --git a/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h new file mode 100644 index 000000000..39495c1a2 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h @@ -0,0 +1,227 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H +#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H + +namespace internal { + +/********************************************************************** +* This file implements a general A * B product while +* evaluating only one triangular part of the product. +* This is more general version of self adjoint product (C += A A^T) +* as the level 3 SYRK Blas routine. +**********************************************************************/ + +// forward declarations (defined at the end of this file) +template +struct tribb_kernel; + +/* Optimized matrix-matrix product evaluating only one triangular half */ +template +struct general_matrix_matrix_triangular_product; + +// as usual if the result is row major => we transpose the product +template +struct general_matrix_matrix_triangular_product +{ + typedef typename scalar_product_traits::ReturnType ResScalar; + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride, + const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha) + { + general_matrix_matrix_triangular_product + ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha); + } +}; + +template +struct general_matrix_matrix_triangular_product +{ + typedef typename scalar_product_traits::ReturnType ResScalar; + static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha) + { + const_blas_data_mapper lhs(_lhs,lhsStride); + const_blas_data_mapper rhs(_rhs,rhsStride); + + typedef gebp_traits Traits; + + Index kc = depth; // cache block size along the K direction + Index mc = size; // cache block size along the M direction + Index nc = size; // cache block size along the N direction + computeProductBlockingSizes(kc, mc, nc); + // !!! mc must be a multiple of nr: + if(mc > Traits::nr) + mc = (mc/Traits::nr)*Traits::nr; + + LhsScalar* blockA = ei_aligned_stack_new(LhsScalar, kc*mc); + std::size_t sizeW = kc*Traits::WorkSpaceFactor; + std::size_t sizeB = sizeW + kc*size; + RhsScalar* allocatedBlockB = ei_aligned_stack_new(RhsScalar, sizeB); + RhsScalar* blockB = allocatedBlockB + sizeW; + + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gebp_kernel gebp; + tribb_kernel sybb; + + for(Index k2=0; k2 processed with gebp or skipped + // 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel + // 3 - after the diagonal => processed with gebp or skipped + if (UpLo==Lower) + gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, std::min(size,i2), alpha, + -1, -1, 0, 0, allocatedBlockB); + + sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB); + + if (UpLo==Upper) + { + Index j2 = i2+actual_mc; + gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0), size-j2), alpha, + -1, -1, 0, 0, allocatedBlockB); + } + } + } + ei_aligned_stack_delete(LhsScalar, blockA, kc*mc); + ei_aligned_stack_delete(RhsScalar, allocatedBlockB, sizeB); + } +}; + +// Optimized packed Block * packed Block product kernel evaluating only one given triangular part +// This kernel is built on top of the gebp kernel: +// - the current destination block is processed per panel of actual_mc x BlockSize +// where BlockSize is set to the minimal value allowing gebp to be as fast as possible +// - then, as usual, each panel is split into three parts along the diagonal, +// the sub blocks above and below the diagonal are processed as usual, +// while the triangular block overlapping the diagonal is evaluated into a +// small temporary buffer which is then accumulated into the result using a +// triangular traversal. +template +struct tribb_kernel +{ + typedef gebp_traits Traits; + typedef typename Traits::ResScalar ResScalar; + + enum { + BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr) + }; + void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, ResScalar alpha, RhsScalar* workspace) + { + gebp_kernel gebp_kernel; + Matrix buffer; + + // let's process the block per panel of actual_mc x BlockSize, + // again, each is split into three parts, etc. + for (Index j=0; j(BlockSize,size - j); + const RhsScalar* actual_b = blockB+j*depth; + + if(UpLo==Upper) + gebp_kernel(res+j*resStride, resStride, blockA, actual_b, j, depth, actualBlockSize, alpha, + -1, -1, 0, 0, workspace); + + // selfadjoint micro block + { + Index i = j; + buffer.setZero(); + // 1 - apply the kernel on the temporary buffer + gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha, + -1, -1, 0, 0, workspace); + // 2 - triangular accumulation + for(Index j1=0; j1 +template +TriangularView& TriangularView::assignProduct(const ProductBase& prod, const Scalar& alpha) +{ + typedef typename internal::remove_all::type Lhs; + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs; + typedef typename internal::remove_all::type _ActualLhs; + const ActualLhs actualLhs = LhsBlasTraits::extract(prod.lhs()); + + typedef typename internal::remove_all::type Rhs; + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs; + typedef typename internal::remove_all::type _ActualRhs; + const ActualRhs actualRhs = RhsBlasTraits::extract(prod.rhs()); + + typename ProductDerived::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived()); + + internal::general_matrix_matrix_triangular_product + ::run(m_matrix.cols(), actualLhs.cols(), + &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(), + const_cast(m_matrix.data()), m_matrix.outerStride(), actualAlpha); + + return *this; +} + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixVector.h b/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixVector.h index 96a038b05..540638b5a 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/GeneralMatrixVector.h @@ -25,6 +25,8 @@ #ifndef EIGEN_GENERAL_MATRIX_VECTOR_H #define EIGEN_GENERAL_MATRIX_VECTOR_H +namespace internal { + /* Optimized col-major matrix * vector product: * This algorithm processes 4 columns at onces that allows to both reduce * the number of load/stores of the result by a factor 4 and to reduce @@ -39,25 +41,25 @@ * |cplx |real |real | optimal case, vectorization possible via real-cplx mul */ template -struct ei_general_matrix_vector_product +struct general_matrix_vector_product { -typedef typename ei_scalar_product_traits::ReturnType ResScalar; +typedef typename scalar_product_traits::ReturnType ResScalar; enum { - Vectorizable = ei_packet_traits::Vectorizable && ei_packet_traits::Vectorizable - && int(ei_packet_traits::size)==int(ei_packet_traits::size), - LhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - RhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - ResPacketSize = Vectorizable ? ei_packet_traits::size : 1 + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable + && int(packet_traits::size)==int(packet_traits::size), + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1 }; -typedef typename ei_packet_traits::type _LhsPacket; -typedef typename ei_packet_traits::type _RhsPacket; -typedef typename ei_packet_traits::type _ResPacket; +typedef typename packet_traits::type _LhsPacket; +typedef typename packet_traits::type _RhsPacket; +typedef typename packet_traits::type _ResPacket; -typedef typename ei_meta_if::ret LhsPacket; -typedef typename ei_meta_if::ret RhsPacket; -typedef typename ei_meta_if::ret ResPacket; +typedef typename conditional::type LhsPacket; +typedef typename conditional::type RhsPacket; +typedef typename conditional::type ResPacket; EIGEN_DONT_INLINE static void run( Index rows, Index cols, @@ -69,23 +71,23 @@ EIGEN_DONT_INLINE static void run( #endif , RhsScalar alpha) { - ei_internal_assert(resIncr==1); + eigen_internal_assert(resIncr==1); #ifdef _EIGEN_ACCUMULATE_PACKETS #error _EIGEN_ACCUMULATE_PACKETS has already been defined #endif #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \ - ei_pstore(&res[j], \ - ei_padd(ei_pload(&res[j]), \ - ei_padd( \ - ei_padd(pcj.pmul(EIGEN_CAT(ei_ploa , A0)(&lhs0[j]), ptmp0), \ - pcj.pmul(EIGEN_CAT(ei_ploa , A13)(&lhs1[j]), ptmp1)), \ - ei_padd(pcj.pmul(EIGEN_CAT(ei_ploa , A2)(&lhs2[j]), ptmp2), \ - pcj.pmul(EIGEN_CAT(ei_ploa , A13)(&lhs3[j]), ptmp3)) ))) + pstore(&res[j], \ + padd(pload(&res[j]), \ + padd( \ + padd(pcj.pmul(EIGEN_CAT(ploa , A0)(&lhs0[j]), ptmp0), \ + pcj.pmul(EIGEN_CAT(ploa , A13)(&lhs1[j]), ptmp1)), \ + padd(pcj.pmul(EIGEN_CAT(ploa , A2)(&lhs2[j]), ptmp2), \ + pcj.pmul(EIGEN_CAT(ploa , A13)(&lhs3[j]), ptmp3)) ))) - ei_conj_helper cj; - ei_conj_helper pcj; + conj_helper cj; + conj_helper pcj; if(ConjugateRhs) - alpha = ei_conj(alpha); + alpha = conj(alpha); enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned }; const Index columnsAtOnce = 4; @@ -97,7 +99,7 @@ EIGEN_DONT_INLINE static void run( // How many coeffs of the result do we have to skip to be aligned. // Here we assume data are at least aligned on the base scalar type. - Index alignedStart = ei_first_aligned(res,size); + Index alignedStart = first_aligned(res,size); Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0; const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; @@ -107,7 +109,7 @@ EIGEN_DONT_INLINE static void run( : FirstAligned; // we cannot assume the first element is aligned because of sub-matrices - const Index lhsAlignmentOffset = ei_first_aligned(lhs,size); + const Index lhsAlignmentOffset = first_aligned(lhs,size); // find how many columns do we have to skip to be aligned with the result (if possible) Index skipColumns = 0; @@ -119,7 +121,7 @@ EIGEN_DONT_INLINE static void run( } else if (LhsPacketSize>1) { - ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size= cols) || LhsPacketSize > size || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0); @@ -154,10 +156,10 @@ EIGEN_DONT_INLINE static void run( Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns; for (Index i=skipColumns; i(alpha*rhs[i*rhsIncr]), - ptmp1 = ei_pset1(alpha*rhs[(i+offset1)*rhsIncr]), - ptmp2 = ei_pset1(alpha*rhs[(i+2)*rhsIncr]), - ptmp3 = ei_pset1(alpha*rhs[(i+offset3)*rhsIncr]); + RhsPacket ptmp0 = pset1(alpha*rhs[i*rhsIncr]), + ptmp1 = pset1(alpha*rhs[(i+offset1)*rhsIncr]), + ptmp2 = pset1(alpha*rhs[(i+2)*rhsIncr]), + ptmp3 = pset1(alpha*rhs[(i+offset3)*rhsIncr]); // this helps a lot generating better binary code const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride, @@ -169,10 +171,10 @@ EIGEN_DONT_INLINE static void run( // process initial unaligned coeffs for (Index j=0; jalignedStart) @@ -193,32 +195,32 @@ EIGEN_DONT_INLINE static void run( LhsPacket A00, A01, A02, A03, A10, A11, A12, A13; ResPacket T0, T1; - A01 = ei_pload(&lhs1[alignedStart-1]); - A02 = ei_pload(&lhs2[alignedStart-2]); - A03 = ei_pload(&lhs3[alignedStart-3]); + A01 = pload(&lhs1[alignedStart-1]); + A02 = pload(&lhs2[alignedStart-2]); + A03 = pload(&lhs3[alignedStart-3]); for (Index j = alignedStart; j(&lhs1[j-1+LhsPacketSize]); ei_palign<1>(A01,A11); - A12 = ei_pload(&lhs2[j-2+LhsPacketSize]); ei_palign<2>(A02,A12); - A13 = ei_pload(&lhs3[j-3+LhsPacketSize]); ei_palign<3>(A03,A13); + A11 = pload(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11); + A12 = pload(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12); + A13 = pload(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13); - A00 = ei_pload(&lhs0[j]); - A10 = ei_pload(&lhs0[j+LhsPacketSize]); - T0 = pcj.pmadd(A00, ptmp0, ei_pload(&res[j])); - T1 = pcj.pmadd(A10, ptmp0, ei_pload(&res[j+ResPacketSize])); + A00 = pload(&lhs0[j]); + A10 = pload(&lhs0[j+LhsPacketSize]); + T0 = pcj.pmadd(A00, ptmp0, pload(&res[j])); + T1 = pcj.pmadd(A10, ptmp0, pload(&res[j+ResPacketSize])); T0 = pcj.pmadd(A01, ptmp1, T0); - A01 = ei_pload(&lhs1[j-1+2*LhsPacketSize]); ei_palign<1>(A11,A01); + A01 = pload(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01); T0 = pcj.pmadd(A02, ptmp2, T0); - A02 = ei_pload(&lhs2[j-2+2*LhsPacketSize]); ei_palign<2>(A12,A02); + A02 = pload(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02); T0 = pcj.pmadd(A03, ptmp3, T0); - ei_pstore(&res[j],T0); - A03 = ei_pload(&lhs3[j-3+2*LhsPacketSize]); ei_palign<3>(A13,A03); + pstore(&res[j],T0); + A03 = pload(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03); T1 = pcj.pmadd(A11, ptmp1, T1); T1 = pcj.pmadd(A12, ptmp2, T1); T1 = pcj.pmadd(A13, ptmp3, T1); - ei_pstore(&res[j+ResPacketSize],T1); + pstore(&res[j+ResPacketSize],T1); } } for (Index j = peeledSize; j(alpha*rhs[k*rhsIncr]); + RhsPacket ptmp0 = pset1(alpha*rhs[k*rhsIncr]); const LhsScalar* lhs0 = lhs + k*lhsStride; if (Vectorizable) @@ -257,19 +259,19 @@ EIGEN_DONT_INLINE static void run( /* explicit vectorization */ // process first unaligned result's coeffs for (Index j=0; j(&lhs0[i]), ptmp0, ei_pload(&res[i]))); + pstore(&res[i], pcj.pmadd(ploadu(&lhs0[i]), ptmp0, pload(&res[i]))); else for (Index i = alignedStart;i(&lhs0[i]), ptmp0, ei_pload(&res[i]))); + pstore(&res[i], pcj.pmadd(ploadu(&lhs0[i]), ptmp0, pload(&res[i]))); } // process remaining scalars (or all if no explicit vectorization) for (Index i=alignedSize; i -struct ei_general_matrix_vector_product +struct general_matrix_vector_product { -typedef typename ei_scalar_product_traits::ReturnType ResScalar; +typedef typename scalar_product_traits::ReturnType ResScalar; enum { - Vectorizable = ei_packet_traits::Vectorizable && ei_packet_traits::Vectorizable - && int(ei_packet_traits::size)==int(ei_packet_traits::size), - LhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - RhsPacketSize = Vectorizable ? ei_packet_traits::size : 1, - ResPacketSize = Vectorizable ? ei_packet_traits::size : 1 + Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable + && int(packet_traits::size)==int(packet_traits::size), + LhsPacketSize = Vectorizable ? packet_traits::size : 1, + RhsPacketSize = Vectorizable ? packet_traits::size : 1, + ResPacketSize = Vectorizable ? packet_traits::size : 1 }; -typedef typename ei_packet_traits::type _LhsPacket; -typedef typename ei_packet_traits::type _RhsPacket; -typedef typename ei_packet_traits::type _ResPacket; +typedef typename packet_traits::type _LhsPacket; +typedef typename packet_traits::type _RhsPacket; +typedef typename packet_traits::type _ResPacket; -typedef typename ei_meta_if::ret LhsPacket; -typedef typename ei_meta_if::ret RhsPacket; -typedef typename ei_meta_if::ret ResPacket; +typedef typename conditional::type LhsPacket; +typedef typename conditional::type RhsPacket; +typedef typename conditional::type ResPacket; EIGEN_DONT_INLINE static void run( Index rows, Index cols, @@ -323,20 +325,20 @@ EIGEN_DONT_INLINE static void run( ResScalar alpha) { EIGEN_UNUSED_VARIABLE(rhsIncr); - ei_internal_assert(rhsIncr==1); + eigen_internal_assert(rhsIncr==1); #ifdef _EIGEN_ACCUMULATE_PACKETS #error _EIGEN_ACCUMULATE_PACKETS has already been defined #endif #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\ - RhsPacket b = ei_pload(&rhs[j]); \ - ptmp0 = pcj.pmadd(EIGEN_CAT(ei_ploa,A0) (&lhs0[j]), b, ptmp0); \ - ptmp1 = pcj.pmadd(EIGEN_CAT(ei_ploa,A13)(&lhs1[j]), b, ptmp1); \ - ptmp2 = pcj.pmadd(EIGEN_CAT(ei_ploa,A2) (&lhs2[j]), b, ptmp2); \ - ptmp3 = pcj.pmadd(EIGEN_CAT(ei_ploa,A13)(&lhs3[j]), b, ptmp3); } + RhsPacket b = pload(&rhs[j]); \ + ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) (&lhs0[j]), b, ptmp0); \ + ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)(&lhs1[j]), b, ptmp1); \ + ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) (&lhs2[j]), b, ptmp2); \ + ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)(&lhs3[j]), b, ptmp3); } - ei_conj_helper cj; - ei_conj_helper pcj; + conj_helper cj; + conj_helper pcj; enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 }; const Index rowsAtOnce = 4; @@ -349,7 +351,7 @@ EIGEN_DONT_INLINE static void run( // How many coeffs of the result do we have to skip to be aligned. // Here we assume data are at least aligned on the base scalar type // if that's not the case then vectorization is discarded, see below. - Index alignedStart = ei_first_aligned(rhs, depth); + Index alignedStart = first_aligned(rhs, depth); Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0; const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; @@ -359,7 +361,7 @@ EIGEN_DONT_INLINE static void run( : FirstAligned; // we cannot assume the first element is aligned because of sub-matrices - const Index lhsAlignmentOffset = ei_first_aligned(lhs,depth); + const Index lhsAlignmentOffset = first_aligned(lhs,depth); // find how many rows do we have to skip to be aligned with rhs (if possible) Index skipRows = 0; @@ -371,7 +373,7 @@ EIGEN_DONT_INLINE static void run( } else if (LhsPacketSize>1) { - ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth= rows) || LhsPacketSize > depth @@ -416,8 +418,8 @@ EIGEN_DONT_INLINE static void run( if (Vectorizable) { /* explicit vectorization */ - ResPacket ptmp0 = ei_pset1(ResScalar(0)), ptmp1 = ei_pset1(ResScalar(0)), - ptmp2 = ei_pset1(ResScalar(0)), ptmp3 = ei_pset1(ResScalar(0)); + ResPacket ptmp0 = pset1(ResScalar(0)), ptmp1 = pset1(ResScalar(0)), + ptmp2 = pset1(ResScalar(0)), ptmp3 = pset1(ResScalar(0)); // process initial unaligned coeffs // FIXME this loop get vectorized by the compiler ! @@ -450,27 +452,27 @@ EIGEN_DONT_INLINE static void run( * than basic unaligned loads. */ LhsPacket A01, A02, A03, A11, A12, A13; - A01 = ei_pload(&lhs1[alignedStart-1]); - A02 = ei_pload(&lhs2[alignedStart-2]); - A03 = ei_pload(&lhs3[alignedStart-3]); + A01 = pload(&lhs1[alignedStart-1]); + A02 = pload(&lhs2[alignedStart-2]); + A03 = pload(&lhs3[alignedStart-3]); for (Index j = alignedStart; j(&rhs[j]); - A11 = ei_pload(&lhs1[j-1+LhsPacketSize]); ei_palign<1>(A01,A11); - A12 = ei_pload(&lhs2[j-2+LhsPacketSize]); ei_palign<2>(A02,A12); - A13 = ei_pload(&lhs3[j-3+LhsPacketSize]); ei_palign<3>(A03,A13); + RhsPacket b = pload(&rhs[j]); + A11 = pload(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11); + A12 = pload(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12); + A13 = pload(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13); - ptmp0 = pcj.pmadd(ei_pload(&lhs0[j]), b, ptmp0); + ptmp0 = pcj.pmadd(pload(&lhs0[j]), b, ptmp0); ptmp1 = pcj.pmadd(A01, b, ptmp1); - A01 = ei_pload(&lhs1[j-1+2*LhsPacketSize]); ei_palign<1>(A11,A01); + A01 = pload(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01); ptmp2 = pcj.pmadd(A02, b, ptmp2); - A02 = ei_pload(&lhs2[j-2+2*LhsPacketSize]); ei_palign<2>(A12,A02); + A02 = pload(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02); ptmp3 = pcj.pmadd(A03, b, ptmp3); - A03 = ei_pload(&lhs3[j-3+2*LhsPacketSize]); ei_palign<3>(A13,A03); + A03 = pload(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03); - b = ei_pload(&rhs[j+RhsPacketSize]); - ptmp0 = pcj.pmadd(ei_pload(&lhs0[j+LhsPacketSize]), b, ptmp0); + b = pload(&rhs[j+RhsPacketSize]); + ptmp0 = pcj.pmadd(pload(&lhs0[j+LhsPacketSize]), b, ptmp0); ptmp1 = pcj.pmadd(A11, b, ptmp1); ptmp2 = pcj.pmadd(A12, b, ptmp2); ptmp3 = pcj.pmadd(A13, b, ptmp3); @@ -484,10 +486,10 @@ EIGEN_DONT_INLINE static void run( _EIGEN_ACCUMULATE_PACKETS(du,du,du); break; } - tmp0 += ei_predux(ptmp0); - tmp1 += ei_predux(ptmp1); - tmp2 += ei_predux(ptmp2); - tmp3 += ei_predux(ptmp3); + tmp0 += predux(ptmp0); + tmp1 += predux(ptmp1); + tmp2 += predux(ptmp2); + tmp3 += predux(ptmp3); } } // end explicit vectorization @@ -513,7 +515,7 @@ EIGEN_DONT_INLINE static void run( for (Index i=start; i(tmp0); + ResPacket ptmp0 = pset1(tmp0); const LhsScalar* lhs0 = lhs + i*lhsStride; // process first unaligned result's coeffs // FIXME this loop get vectorized by the compiler ! @@ -525,11 +527,11 @@ EIGEN_DONT_INLINE static void run( // process aligned rhs coeffs if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0) for (Index j = alignedStart;j(&lhs0[j]), ei_pload(&rhs[j]), ptmp0); + ptmp0 = pcj.pmadd(pload(&lhs0[j]), pload(&rhs[j]), ptmp0); else for (Index j = alignedStart;j(&lhs0[j]), ei_pload(&rhs[j]), ptmp0); - tmp0 += ei_predux(ptmp0); + ptmp0 = pcj.pmadd(ploadu(&lhs0[j]), pload(&rhs[j]), ptmp0); + tmp0 += predux(ptmp0); } // process remaining scalars @@ -552,4 +554,6 @@ EIGEN_DONT_INLINE static void run( } }; +} // end namespace internal + #endif // EIGEN_GENERAL_MATRIX_VECTOR_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/Parallelizer.h b/gtsam/3rdparty/Eigen/src/Core/products/Parallelizer.h index b13c0706e..ecdedc363 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/Parallelizer.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/Parallelizer.h @@ -25,19 +25,21 @@ #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H +namespace internal { + /** \internal */ -inline void ei_manage_multi_threading(Action action, int* v) +inline void manage_multi_threading(Action action, int* v) { - static int m_maxThreads = -1; + static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { - ei_internal_assert(v!=0); + eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { - ei_internal_assert(v!=0); + eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; @@ -49,7 +51,7 @@ inline void ei_manage_multi_threading(Action action, int* v) } else { - ei_internal_assert(false); + eigen_internal_assert(false); } } @@ -58,7 +60,7 @@ inline void ei_manage_multi_threading(Action action, int* v) inline int nbThreads() { int ret; - ei_manage_multi_threading(GetAction, &ret); + manage_multi_threading(GetAction, &ret); return ret; } @@ -66,7 +68,7 @@ inline int nbThreads() * \sa nbThreads */ inline void setNbThreads(int v) { - ei_manage_multi_threading(SetAction, &v); + manage_multi_threading(SetAction, &v); } template struct GemmParallelInfo @@ -81,7 +83,7 @@ template struct GemmParallelInfo }; template -void ei_parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) +void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { #ifndef EIGEN_HAS_OPENMP // FIXME the transpose variable is only needed to properly split @@ -122,7 +124,7 @@ void ei_parallelize_gemm(const Functor& func, Index rows, Index cols, bool trans Index blockCols = (cols / threads) & ~Index(0x3); Index blockRows = (rows / threads) & ~Index(0x7); - + GemmParallelInfo* info = new GemmParallelInfo[threads]; #pragma omp parallel for schedule(static,1) num_threads(threads) @@ -147,4 +149,6 @@ void ei_parallelize_gemm(const Functor& func, Index rows, Index cols, bool trans #endif } +} // end namespace internal + #endif // EIGEN_PARALLELIZER_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixMatrix.h index ede8b77bf..cfebcf426 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixMatrix.h @@ -25,12 +25,14 @@ #ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H #define EIGEN_SELFADJOINT_MATRIX_MATRIX_H +namespace internal { + // pack a selfadjoint block diagonal for use with the gebp_kernel template -struct ei_symm_pack_lhs +struct symm_pack_lhs { template inline - void pack(Scalar* blockA, const ei_const_blas_data_mapper& lhs, Index cols, Index i, Index& count) + void pack(Scalar* blockA, const const_blas_data_mapper& lhs, Index cols, Index i, Index& count) { // normal copy for(Index k=0; k lhs(_lhs,lhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); Index count = 0; Index peeled_mc = (rows/Pack1)*Pack1; for(Index i=0; i -struct ei_symm_pack_rhs +struct symm_pack_rhs { - enum { PacketSize = ei_packet_traits::size }; + enum { PacketSize = packet_traits::size }; void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { Index end_k = k2 + rows; Index count = 0; - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + const_blas_data_mapper rhs(_rhs,rhsStride); Index packet_cols = (cols/nr)*nr; // first part: normal case @@ -118,12 +120,12 @@ struct ei_symm_pack_rhs // transpose for(Index k=k2; k -struct ei_product_selfadjoint_matrix; +struct product_selfadjoint_matrix; template -struct ei_product_selfadjoint_matrix +struct product_selfadjoint_matrix { static EIGEN_STRONG_INLINE void run( @@ -224,7 +226,7 @@ struct ei_product_selfadjoint_matrix::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs), EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor, @@ -237,7 +239,7 @@ struct ei_product_selfadjoint_matrix -struct ei_product_selfadjoint_matrix +struct product_selfadjoint_matrix { static EIGEN_DONT_INLINE void run( @@ -249,10 +251,10 @@ struct ei_product_selfadjoint_matrix lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); + const_blas_data_mapper rhs(_rhs,rhsStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; Index kc = size; // cache block size along the K direction Index mc = rows; // cache block size along the M direction @@ -267,10 +269,10 @@ struct ei_product_selfadjoint_matrix gebp_kernel; - ei_symm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_lhs pack_lhs_transposed; + gebp_kernel gebp_kernel; + symm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gemm_pack_lhs pack_lhs_transposed; for(Index k2=0; k2() + gemm_pack_lhs() (blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha); @@ -321,7 +323,7 @@ struct ei_product_selfadjoint_matrix -struct ei_product_selfadjoint_matrix +struct product_selfadjoint_matrix { static EIGEN_DONT_INLINE void run( @@ -333,9 +335,9 @@ struct ei_product_selfadjoint_matrix lhs(_lhs,lhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; Index kc = size; // cache block size along the K direction Index mc = rows; // cache block size along the M direction @@ -348,9 +350,9 @@ struct ei_product_selfadjoint_matrix gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_symm_pack_rhs pack_rhs; + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + symm_pack_rhs pack_rhs; for(Index k2=0; k2 -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; +} template struct SelfadjointProductMatrix @@ -399,7 +405,7 @@ struct SelfadjointProductMatrix template void scaleAndAddTo(Dest& dst, Scalar alpha) const { - ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); + eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); @@ -407,18 +413,18 @@ struct SelfadjointProductMatrix Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); - ei_product_selfadjoint_matrix::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint, + internal::traits::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint, NumTraits::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)), EIGEN_LOGICAL_XOR(RhsIsUpper, - ei_traits::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint, + internal::traits::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint, NumTraits::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)), - ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor> + internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor> ::run( lhs.rows(), rhs.cols(), // sizes - &lhs.coeff(0,0), lhs.outerStride(), // lhs info - &rhs.coeff(0,0), rhs.outerStride(), // rhs info + &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info + &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info &dst.coeffRef(0,0), dst.outerStride(), // result info actualAlpha // alpha ); diff --git a/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixVector.h b/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixVector.h index df7509f9a..1d433e16d 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixVector.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -25,19 +25,23 @@ #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H +namespace internal { + /* Optimized selfadjoint matrix * vector product: * This algorithm processes 2 columns at onces that allows to both reduce * the number of load/stores of the result by a factor 2 and to reduce * the instruction dependency. */ template -static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( +static EIGEN_DONT_INLINE void product_selfadjoint_vector( Index size, const Scalar* lhs, Index lhsStride, const Scalar* _rhs, Index rhsIncr, - Scalar* res, Scalar alpha) + Scalar* res, + Scalar alpha) { - typedef typename ei_packet_traits::type Packet; + typedef typename packet_traits::type Packet; + typedef typename NumTraits::Real RealScalar; const Index PacketSize = sizeof(Packet)/sizeof(Scalar); enum { @@ -46,14 +50,16 @@ static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( FirstTriangular = IsRowMajor == IsLower }; - ei_conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0; - ei_conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1; + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0; + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1; + conj_helper::IsComplex, ConjugateRhs> cjd; - ei_conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0; - ei_conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1; + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0; + conj_helper::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1; - Scalar cjAlpha = ConjugateRhs ? ei_conj(alpha) : alpha; + Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha; + // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed. // if the rhs is not sequentially stored in memory we copy it to a temporary buffer, // this is because we need to extract packets const Scalar* EIGEN_RESTRICT rhs = _rhs; @@ -77,39 +83,39 @@ static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride; Scalar t0 = cjAlpha * rhs[j]; - Packet ptmp0 = ei_pset1(t0); + Packet ptmp0 = pset1(t0); Scalar t1 = cjAlpha * rhs[j+1]; - Packet ptmp1 = ei_pset1(t1); + Packet ptmp1 = pset1(t1); Scalar t2 = 0; - Packet ptmp2 = ei_pset1(t2); + Packet ptmp2 = pset1(t2); Scalar t3 = 0; - Packet ptmp3 = ei_pset1(t3); + Packet ptmp3 = pset1(t3); size_t starti = FirstTriangular ? 0 : j+2; size_t endi = FirstTriangular ? j : size; - size_t alignedEnd = starti; - size_t alignedStart = (starti) + ei_first_aligned(&res[starti], endi-starti); - alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); + size_t alignedStart = (starti) + first_aligned(&res[starti], endi-starti); + size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); - res[j] += cj0.pmul(A0[j], t0); + // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed + res[j] += cjd.pmul(internal::real(A0[j]), t0); + res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1); if(FirstTriangular) { - res[j+1] += cj0.pmul(A1[j+1], t1); res[j] += cj0.pmul(A1[j], t1); t3 += cj1.pmul(A1[j], rhs[j]); } else { - res[j+1] += cj0.pmul(A0[j+1],t0) + cj0.pmul(A1[j+1],t1); + res[j+1] += cj0.pmul(A0[j+1],t0); t2 += cj1.pmul(A0[j+1], rhs[j+1]); } for (size_t i=starti; i huge speed up) // gcc 4.2 does this optimization automatically. @@ -119,15 +125,15 @@ static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( Scalar* EIGEN_RESTRICT resIt = res + alignedStart; for (size_t i=alignedStart; i(a0It); a0It += PacketSize; - Packet A1i = ei_ploadu(a1It); a1It += PacketSize; - Packet Bi = ei_ploadu(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases - Packet Xi = ei_pload (resIt); + Packet A0i = ploadu(a0It); a0It += PacketSize; + Packet A1i = ploadu(a1It); a1It += PacketSize; + Packet Bi = ploadu(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases + Packet Xi = pload (resIt); Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi)); ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2); ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3); - ei_pstore(resIt,Xi); resIt += PacketSize; + pstore(resIt,Xi); resIt += PacketSize; } for (size_t i=alignedEnd; i(rhs), size); } +} // end namespace internal + /*************************************************************************** -* Wrapper to ei_product_selfadjoint_vector +* Wrapper to product_selfadjoint_vector ***************************************************************************/ +namespace internal { template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; +} template struct SelfadjointProductMatrix @@ -178,9 +190,13 @@ struct SelfadjointProductMatrix SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void scaleAndAddTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dest, Scalar alpha) const { - ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); + typedef typename Dest::Scalar ResScalar; + typedef typename Base::RhsScalar RhsScalar; + typedef Map, Aligned> MappedDest; + + eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols()); const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); @@ -188,23 +204,75 @@ struct SelfadjointProductMatrix Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); - ei_assert(dst.innerStride()==1 && "not implemented yet"); - - ei_product_selfadjoint_vector::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)> + enum { + EvalToDest = (Dest::InnerStrideAtCompileTime==1), + UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1) + }; + + internal::gemv_static_vector_if static_dest; + internal::gemv_static_vector_if static_rhs; + + bool freeDestPtr = false; + ResScalar* actualDestPtr; + if(EvalToDest) + actualDestPtr = dest.data(); + else + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if((actualDestPtr=static_dest.data())==0) + { + freeDestPtr = true; + actualDestPtr = ei_aligned_stack_new(ResScalar,dest.size()); + } + MappedDest(actualDestPtr, dest.size()) = dest; + } + + bool freeRhsPtr = false; + RhsScalar* actualRhsPtr; + if(UseRhs) + actualRhsPtr = const_cast(rhs.data()); + else + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = rhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if((actualRhsPtr=static_rhs.data())==0) + { + freeRhsPtr = true; + actualRhsPtr = ei_aligned_stack_new(RhsScalar,rhs.size()); + } + Map(actualRhsPtr, rhs.size()) = rhs; + } + + + internal::product_selfadjoint_vector::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)> ( - lhs.rows(), // size - &lhs.coeff(0,0), lhs.outerStride(), // lhs info - &rhs.coeff(0), rhs.innerStride(), // rhs info - &dst.coeffRef(0), // result info - actualAlpha // scale factor + lhs.rows(), // size + &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info + actualRhsPtr, 1, // rhs info + actualDestPtr, // result info + actualAlpha // scale factor ); + + if(!EvalToDest) + { + dest = MappedDest(actualDestPtr, dest.size()); + if(freeDestPtr) ei_aligned_stack_delete(ResScalar, actualDestPtr, dest.size()); + } + if(freeRhsPtr) ei_aligned_stack_delete(RhsScalar, actualRhsPtr, rhs.size()); } }; +namespace internal { template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; +} template struct SelfadjointProductMatrix @@ -218,28 +286,12 @@ struct SelfadjointProductMatrix SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void scaleAndAddTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dest, Scalar alpha) const { - ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - ei_assert(dst.innerStride()==1 && "not implemented yet"); - - // transpose the product - ei_product_selfadjoint_vector::Flags&RowMajorBit) ? ColMajor : RowMajor, int(RhsUpLo)==Upper ? Lower : Upper, - bool(RhsBlasTraits::NeedToConjugate), bool(LhsBlasTraits::NeedToConjugate)> - ( - rhs.rows(), // size - &rhs.coeff(0,0), rhs.outerStride(), // lhs info - &lhs.coeff(0), lhs.innerStride(), // rhs info - &dst.coeffRef(0), // result info - actualAlpha // scale factor - ); + // let's simply transpose the product + Transpose destT(dest); + SelfadjointProductMatrix, int(RhsUpLo)==Upper ? Lower : Upper, false, + Transpose, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha); } }; diff --git a/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointProduct.h b/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointProduct.h index 8f431c2e4..da2e6ee20 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointProduct.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/SelfadjointProduct.h @@ -28,106 +28,106 @@ /********************************************************************** * This file implements a self adjoint product: C += A A^T updating only * half of the selfadjoint matrix C. -* It corresponds to the level 3 SYRK Blas routine. +* It corresponds to the level 3 SYRK and level 2 SYR Blas routines. **********************************************************************/ -// forward declarations (defined at the end of this file) -template -struct ei_sybb_kernel; +template +struct selfadjoint_rank1_update; -/* Optimized selfadjoint product (_SYRK) */ -template -struct ei_selfadjoint_product; - -// as usual if the result is row major => we transpose the product -template -struct ei_selfadjoint_product +template +struct selfadjoint_rank1_update { - static EIGEN_STRONG_INLINE void run(Index size, Index depth, const Scalar* mat, Index matStride, Scalar* res, Index resStride, Scalar alpha) + static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha) { - ei_selfadjoint_product - ::run(size, depth, mat, matStride, res, resStride, alpha); + internal::conj_if cj; + typedef Map > OtherMap; + typedef typename internal::conditional::type ConjRhsType; + for (Index i=0; i >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1))) + += (alpha * cj(vec[i])) * ConjRhsType(OtherMap(vec+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1))); + } } }; -template -struct ei_selfadjoint_product +template +struct selfadjoint_rank1_update { - - static EIGEN_DONT_INLINE void run( - Index size, Index depth, - const Scalar* _mat, Index matStride, - Scalar* res, Index resStride, - Scalar alpha) + static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha) { - ei_const_blas_data_mapper mat(_mat,matStride); + selfadjoint_rank1_update::run(size,mat,stride,vec,alpha); + } +}; -// if(AAT) -// alpha = ei_conj(alpha); +template +struct selfadjoint_product_selector; - typedef ei_gebp_traits Traits; +template +struct selfadjoint_product_selector +{ + static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha) + { + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef internal::blas_traits OtherBlasTraits; + typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; + typedef typename internal::remove_all::type _ActualOtherType; + const ActualOtherType actualOther = OtherBlasTraits::extract(other.derived()); - Index kc = depth; // cache block size along the K direction - Index mc = size; // cache block size along the M direction - Index nc = size; // cache block size along the N direction - computeProductBlockingSizes(kc, mc, nc); - // !!! mc must be a multiple of nr: - if(mc>Traits::nr) - mc = (mc/Traits::nr)*Traits::nr; + Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived()); - Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); - std::size_t sizeW = kc*Traits::WorkSpaceFactor; - std::size_t sizeB = sizeW + kc*size; - Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB); - Scalar* blockB = allocatedBlockB + sizeW; - - // note that the actual rhs is the transpose/adjoint of mat enum { - ConjLhs = NumTraits::IsComplex && !AAT, - ConjRhs = NumTraits::IsComplex && AAT + StorageOrder = (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor, + UseOtherDirectly = _ActualOtherType::InnerStrideAtCompileTime==1 }; - - ei_gebp_kernel gebp_kernel; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_lhs pack_lhs; - ei_sybb_kernel sybb; - - for(Index k2=0; k2 static_other; + + bool freeOtherPtr = false; + Scalar* actualOtherPtr; + if(UseOtherDirectly) + actualOtherPtr = const_cast(actualOther.data()); + else { - const Index actual_kc = std::min(k2+kc,depth)-k2; - - // note that the actual rhs is the transpose/adjoint of mat - pack_rhs(blockB, &mat(0,k2), matStride, actual_kc, size); - - for(Index i2=0; i2 processed with gebp or skipped - // 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel - // 3 - after the diagonal => processed with gebp or skipped - if (UpLo==Lower) - gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, std::min(size,i2), alpha, - -1, -1, 0, 0, allocatedBlockB); - - sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB); - - if (UpLo==Upper) - { - Index j2 = i2+actual_mc; - gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0), size-j2), alpha, - -1, -1, 0, 0, allocatedBlockB); - } + freeOtherPtr = true; + actualOtherPtr = ei_aligned_stack_new(Scalar,other.size()); } + Map(actualOtherPtr, actualOther.size()) = actualOther; } - ei_aligned_stack_delete(Scalar, blockA, kc*mc); - ei_aligned_stack_delete(Scalar, allocatedBlockB, sizeB); + + selfadjoint_rank1_update::IsComplex, + (!OtherBlasTraits::NeedToConjugate) && NumTraits::IsComplex> + ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualAlpha); + + if((!UseOtherDirectly) && freeOtherPtr) ei_aligned_stack_delete(Scalar, actualOtherPtr, other.size()); + } +}; + +template +struct selfadjoint_product_selector +{ + static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha) + { + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef internal::blas_traits OtherBlasTraits; + typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; + typedef typename internal::remove_all::type _ActualOtherType; + const ActualOtherType actualOther = OtherBlasTraits::extract(other.derived()); + + Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived()); + + enum { IsRowMajor = (internal::traits::Flags&RowMajorBit) ? 1 : 0 }; + + internal::general_matrix_matrix_triangular_product::IsComplex, + Scalar, _ActualOtherType::Flags&RowMajorBit ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits::IsComplex, + MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo> + ::run(mat.cols(), actualOther.cols(), + &actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(), + mat.data(), mat.outerStride(), actualAlpha); } }; @@ -138,83 +138,9 @@ template SelfAdjointView& SelfAdjointView ::rankUpdate(const MatrixBase& u, Scalar alpha) { - typedef ei_blas_traits UBlasTraits; - typedef typename UBlasTraits::DirectLinearAccessType ActualUType; - typedef typename ei_cleantype::type _ActualUType; - const ActualUType actualU = UBlasTraits::extract(u.derived()); - - Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived()); - - enum { IsRowMajor = (ei_traits::Flags&RowMajorBit) ? 1 : 0 }; - - ei_selfadjoint_product - ::run(_expression().cols(), actualU.cols(), &actualU.coeff(0,0), actualU.outerStride(), - const_cast(_expression().data()), _expression().outerStride(), actualAlpha); + selfadjoint_product_selector::run(_expression().const_cast_derived(), u.derived(), alpha); return *this; } - -// Optimized SYmmetric packed Block * packed Block product kernel. -// This kernel is built on top of the gebp kernel: -// - the current selfadjoint block (res) is processed per panel of actual_mc x BlockSize -// where BlockSize is set to the minimal value allowing gebp to be as fast as possible -// - then, as usual, each panel is split into three parts along the diagonal, -// the sub blocks above and below the diagonal are processed as usual, -// while the selfadjoint block overlapping the diagonal is evaluated into a -// small temporary buffer which is then accumulated into the result using a -// triangular traversal. -template -struct ei_sybb_kernel -{ - enum { - PacketSize = ei_packet_traits::size, - BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr) - }; - void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index size, Index depth, Scalar alpha, Scalar* workspace) - { - ei_gebp_kernel gebp_kernel; - Matrix buffer; - - // let's process the block per panel of actual_mc x BlockSize, - // again, each is split into three parts, etc. - for (Index j=0; j(BlockSize,size - j); - const Scalar* actual_b = blockB+j*depth; - - if(UpLo==Upper) - gebp_kernel(res+j*resStride, resStride, blockA, actual_b, j, depth, actualBlockSize, alpha, - -1, -1, 0, 0, workspace); - - // selfadjoint micro block - { - Index i = j; - buffer.setZero(); - // 1 - apply the kernel on the temporary buffer - gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha, - -1, -1, 0, 0, workspace); - // 2 - triangular accumulation - for(Index j1=0; j1 -struct ei_selfadjoint_rank2_update_selector; +struct selfadjoint_rank2_update_selector; template -struct ei_selfadjoint_rank2_update_selector +struct selfadjoint_rank2_update_selector { static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha) { @@ -41,54 +43,60 @@ struct ei_selfadjoint_rank2_update_selector for (Index i=0; i >(mat+stride*i+i, size-i) += - (alpha * ei_conj(u.coeff(i))) * v.tail(size-i) - + (alpha * ei_conj(v.coeff(i))) * u.tail(size-i); + (conj(alpha) * conj(u.coeff(i))) * v.tail(size-i) + + (alpha * conj(v.coeff(i))) * u.tail(size-i); } } }; template -struct ei_selfadjoint_rank2_update_selector +struct selfadjoint_rank2_update_selector { static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha) { const Index size = u.size(); for (Index i=0; i >(mat+stride*i, i+1) += - (alpha * ei_conj(u.coeff(i))) * v.head(i+1) - + (alpha * ei_conj(v.coeff(i))) * u.head(i+1); + (conj(alpha) * conj(u.coeff(i))) * v.head(i+1) + + (alpha * conj(v.coeff(i))) * u.head(i+1); } }; -template struct ei_conj_expr_if - : ei_meta_if::Scalar>,T> > {}; +template struct conj_expr_if + : conditional::Scalar>,T> > {}; +} // end namespace internal template template SelfAdjointView& SelfAdjointView ::rankUpdate(const MatrixBase& u, const MatrixBase& v, Scalar alpha) { - typedef ei_blas_traits UBlasTraits; + typedef internal::blas_traits UBlasTraits; typedef typename UBlasTraits::DirectLinearAccessType ActualUType; - typedef typename ei_cleantype::type _ActualUType; + typedef typename internal::remove_all::type _ActualUType; const ActualUType actualU = UBlasTraits::extract(u.derived()); - typedef ei_blas_traits VBlasTraits; + typedef internal::blas_traits VBlasTraits; typedef typename VBlasTraits::DirectLinearAccessType ActualVType; - typedef typename ei_cleantype::type _ActualVType; + typedef typename internal::remove_all::type _ActualVType; const ActualVType actualV = VBlasTraits::extract(v.derived()); - Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived()) - * VBlasTraits::extractScalarFactor(v.derived()); + // If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and + // vice versa, and take the complex conjugate of all coefficients and vector entries. - enum { IsRowMajor = (ei_traits::Flags&RowMajorBit) ? 1 : 0 }; - ei_selfadjoint_rank2_update_selector::ret>::type, - typename ei_cleantype::ret>::type, + enum { IsRowMajor = (internal::traits::Flags&RowMajorBit) ? 1 : 0 }; + Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived()) + * internal::conj(VBlasTraits::extractScalarFactor(v.derived())); + if (IsRowMajor) + actualAlpha = internal::conj(actualAlpha); + + internal::selfadjoint_rank2_update_selector::type>::type, + typename internal::remove_all::type>::type, (IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)> - ::run(const_cast(_expression().data()),_expression().outerStride(),actualU,actualV,actualAlpha); + ::run(_expression().const_cast_derived().data(),_expression().outerStride(),actualU,actualV,actualAlpha); return *this; } diff --git a/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixMatrix.h b/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixMatrix.h index cef5eeba1..66a2515d0 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -25,14 +25,16 @@ #ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H #define EIGEN_TRIANGULAR_MATRIX_MATRIX_H +namespace internal { + // template -// struct ei_gemm_pack_lhs_triangular +// struct gemm_pack_lhs_triangular // { // Matrix::IsComplex && Conjugate> cj; -// ei_const_blas_data_mapper lhs(_lhs,lhsStride); +// conj_if::IsComplex && Conjugate> cj; +// const_blas_data_mapper lhs(_lhs,lhsStride); // int count = 0; // const int peeled_mc = (rows/mr)*mr; // for(int i=0; i -struct ei_product_triangular_matrix_matrix; +struct product_triangular_matrix_matrix; template -struct ei_product_triangular_matrix_matrix { @@ -74,7 +76,7 @@ struct ei_product_triangular_matrix_matrix -struct ei_product_triangular_matrix_matrix { @@ -102,10 +104,10 @@ struct ei_product_triangular_matrix_matrix lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); + const_blas_data_mapper rhs(_rhs,rhsStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; enum { SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), IsLower = (Mode&Lower) == Lower, @@ -130,9 +132,9 @@ struct ei_product_triangular_matrix_matrix gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; for(Index k2=IsLower ? depth : 0; IsLower ? k2>0 : k2() + gemm_pack_lhs() (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha); @@ -217,7 +219,7 @@ struct ei_product_triangular_matrix_matrix -struct ei_product_triangular_matrix_matrix { @@ -229,10 +231,10 @@ struct ei_product_triangular_matrix_matrix lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); + const_blas_data_mapper rhs(_rhs,rhsStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; enum { SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), IsLower = (Mode&Lower) == Lower, @@ -257,10 +259,10 @@ struct ei_product_triangular_matrix_matrix gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_rhs pack_rhs_panel; + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gemm_pack_rhs pack_rhs_panel; for(Index k2=IsLower ? 0 : depth; IsLower ? k20; @@ -352,14 +354,16 @@ struct ei_product_triangular_matrix_matrix -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; +} // end namespace internal + template struct TriangularProduct : public ProductBase, Lhs, Rhs > @@ -376,19 +380,20 @@ struct TriangularProduct Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); - ei_product_triangular_matrix_matrix::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate, - (ei_traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate, - (ei_traits::Flags&RowMajorBit) ? RowMajor : ColMajor> + (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate, + (internal::traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate, + (internal::traits::Flags&RowMajorBit) ? RowMajor : ColMajor> ::run( lhs.rows(), rhs.cols(), lhs.cols(),// LhsIsTriangular ? rhs.cols() : lhs.rows(), // sizes - &lhs.coeff(0,0), lhs.outerStride(), // lhs info - &rhs.coeff(0,0), rhs.outerStride(), // rhs info + &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info + &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info &dst.coeffRef(0,0), dst.outerStride(), // result info actualAlpha // alpha ); } }; + #endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixVector.h b/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixVector.h index 67c131ab2..23aa52ade 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/TriangularMatrixVector.h @@ -25,43 +25,41 @@ #ifndef EIGEN_TRIANGULARMATRIXVECTOR_H #define EIGEN_TRIANGULARMATRIXVECTOR_H -template -struct ei_product_triangular_vector_selector; +namespace internal { -template -struct ei_product_triangular_vector_selector -{ - static EIGEN_DONT_INLINE void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits::Scalar alpha) - { - typedef Transpose TrRhs; TrRhs trRhs(rhs); - typedef Transpose TrLhs; TrLhs trLhs(lhs); - typedef Transpose TrRes; TrRes trRes(res); - ei_product_triangular_vector_selector - ::run(trRhs,trLhs,trRes,alpha); - } -}; +template +struct product_triangular_matrix_vector; -template -struct ei_product_triangular_vector_selector +template +struct product_triangular_matrix_vector { - typedef typename Rhs::Scalar Scalar; - typedef typename Rhs::Index Index; + typedef typename scalar_product_traits::ReturnType ResScalar; enum { IsLower = ((Mode&Lower)==Lower), HasUnitDiag = (Mode & UnitDiag)==UnitDiag }; - static EIGEN_DONT_INLINE void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits::Scalar alpha) + static EIGEN_DONT_INLINE void run(Index rows, Index cols, const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha) { + EIGEN_UNUSED_VARIABLE(resIncr); + eigen_assert(resIncr==1); + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; - typename ei_conj_expr_if::ret cjLhs(lhs); - typename ei_conj_expr_if::ret cjRhs(rhs); - Index size = lhs.cols(); - for (Index pi=0; pi, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); + typename conj_expr_if::type cjLhs(lhs); + + typedef Map, 0, InnerStride<> > RhsMap; + const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr)); + typename conj_expr_if::type cjRhs(rhs); + + typedef Map > ResMap; + ResMap res(_res,rows); + + for (Index pi=0; pi0) { Index s = IsLower ? pi+actualPanelWidth : 0; - ei_general_matrix_vector_product::run( + general_matrix_vector_product::run( r, actualPanelWidth, - &(lhs.const_cast_derived().coeffRef(s,pi)), lhs.outerStride(), - &rhs.coeff(pi), rhs.innerStride(), - &res.coeffRef(s), res.innerStride(), alpha); + &lhs.coeffRef(s,pi), lhsStride, + &rhs.coeffRef(pi), rhsIncr, + &res.coeffRef(s), resIncr, alpha); } } } }; -template -struct ei_product_triangular_vector_selector +template +struct product_triangular_matrix_vector { - typedef typename Rhs::Scalar Scalar; - typedef typename Rhs::Index Index; + typedef typename scalar_product_traits::ReturnType ResScalar; enum { IsLower = ((Mode&Lower)==Lower), HasUnitDiag = (Mode & UnitDiag)==UnitDiag }; - static void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits::Scalar alpha) + static void run(Index rows, Index cols, const LhsScalar* _lhs, Index lhsStride, + const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha) { + eigen_assert(rhsIncr==1); + EIGEN_UNUSED_VARIABLE(rhsIncr); + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; - typename ei_conj_expr_if::ret cjLhs(lhs); - typename ei_conj_expr_if::ret cjRhs(rhs); - Index size = lhs.cols(); - for (Index pi=0; pi, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); + typename conj_expr_if::type cjLhs(lhs); + + typedef Map > RhsMap; + const RhsMap rhs(_rhs,cols); + typename conj_expr_if::type cjRhs(rhs); + + typedef Map, 0, InnerStride<> > ResMap; + ResMap res(_res,rows,InnerStride<>(resIncr)); + + for (Index pi=0; pi0) { Index s = IsLower ? 0 : pi + actualPanelWidth; - ei_general_matrix_vector_product::run( + general_matrix_vector_product::run( actualPanelWidth, r, - &(lhs.const_cast_derived().coeffRef(pi,s)), lhs.outerStride(), - &(rhs.const_cast_derived().coeffRef(s)), 1, - &res.coeffRef(pi,0), res.innerStride(), alpha); + &lhs.coeffRef(pi,s), lhsStride, + &rhs.coeffRef(s), rhsIncr, + &res.coeffRef(pi), resIncr, alpha); } } } }; /*************************************************************************** -* Wrapper to ei_product_triangular_vector +* Wrapper to product_triangular_vector ***************************************************************************/ template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; + +template +struct trmv_selector; + +} // end namespace internal + template struct TriangularProduct : public ProductBase, Lhs, Rhs > @@ -152,21 +168,9 @@ struct TriangularProduct template void scaleAndAddTo(Dest& dst, Scalar alpha) const { - ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - ei_product_triangular_vector_selector - ::Flags)&RowMajorBit) ? RowMajor : ColMajor> - ::run(lhs,rhs,dst,actualAlpha); + eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); + + internal::trmv_selector<(int(internal::traits::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dst, alpha); } }; @@ -180,23 +184,167 @@ struct TriangularProduct template void scaleAndAddTo(Dest& dst, Scalar alpha) const { - - ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); - - const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); - const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); - - Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) - * RhsBlasTraits::extractScalarFactor(m_rhs); - - ei_product_triangular_vector_selector - ::Flags)&RowMajorBit) ? RowMajor : ColMajor> - ::run(lhs,rhs,dst,actualAlpha); + eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); + + typedef TriangularProduct<(Mode & UnitDiag) | ((Mode & Lower) ? Upper : Lower),true,Transpose,false,Transpose,true> TriangularProductTranspose; + Transpose dstT(dst); + internal::trmv_selector<(int(internal::traits::Flags)&RowMajorBit) ? ColMajor : RowMajor>::run( + TriangularProductTranspose(m_rhs.transpose(),m_lhs.transpose()), dstT, alpha); } }; +namespace internal { + +// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same. + +template<> struct trmv_selector +{ + template + static void run(const TriangularProduct& prod, Dest& dest, typename TriangularProduct::Scalar alpha) + { + typedef TriangularProduct ProductType; + typedef typename ProductType::Index Index; + typedef typename ProductType::LhsScalar LhsScalar; + typedef typename ProductType::RhsScalar RhsScalar; + typedef typename ProductType::Scalar ResScalar; + typedef typename ProductType::RealScalar RealScalar; + typedef typename ProductType::ActualLhsType ActualLhsType; + typedef typename ProductType::ActualRhsType ActualRhsType; + typedef typename ProductType::LhsBlasTraits LhsBlasTraits; + typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + typedef Map, Aligned> MappedDest; + + const ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); + const ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); + + ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) + * RhsBlasTraits::extractScalarFactor(prod.rhs()); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal + }; + + gemv_static_vector_if static_dest; + + bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0)); + bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; + + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + + ResScalar* actualDestPtr; + bool freeDestPtr = false; + if (evalToDest) + { + actualDestPtr = dest.data(); + } + else + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if((actualDestPtr = static_dest.data())==0) + { + freeDestPtr = true; + actualDestPtr = ei_aligned_stack_new(ResScalar,dest.size()); + } + if(!alphaIsCompatible) + { + MappedDest(actualDestPtr, dest.size()).setZero(); + compatibleAlpha = RhsScalar(1); + } + else + MappedDest(actualDestPtr, dest.size()) = dest; + } + + internal::product_triangular_matrix_vector + + ::run(actualLhs.rows(),actualLhs.cols(), + actualLhs.data(),actualLhs.outerStride(), + actualRhs.data(),actualRhs.innerStride(), + actualDestPtr,1,compatibleAlpha); + + if (!evalToDest) + { + if(!alphaIsCompatible) + dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); + else + dest = MappedDest(actualDestPtr, dest.size()); + if(freeDestPtr) ei_aligned_stack_delete(ResScalar, actualDestPtr, dest.size()); + } + } +}; + +template<> struct trmv_selector +{ + template + static void run(const TriangularProduct& prod, Dest& dest, typename TriangularProduct::Scalar alpha) + { + typedef TriangularProduct ProductType; + typedef typename ProductType::LhsScalar LhsScalar; + typedef typename ProductType::RhsScalar RhsScalar; + typedef typename ProductType::Scalar ResScalar; + typedef typename ProductType::Index Index; + typedef typename ProductType::ActualLhsType ActualLhsType; + typedef typename ProductType::ActualRhsType ActualRhsType; + typedef typename ProductType::_ActualRhsType _ActualRhsType; + typedef typename ProductType::LhsBlasTraits LhsBlasTraits; + typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + + typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); + + ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) + * RhsBlasTraits::extractScalarFactor(prod.rhs()); + + enum { + DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 + }; + + gemv_static_vector_if static_rhs; + + RhsScalar* actualRhsPtr; + bool freeRhsPtr = false; + if (DirectlyUseRhs) + { + actualRhsPtr = const_cast(actualRhs.data()); + } + else + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if((actualRhsPtr = static_rhs.data())==0) + { + freeRhsPtr = true; + actualRhsPtr = ei_aligned_stack_new(RhsScalar, actualRhs.size()); + } + Map(actualRhsPtr, actualRhs.size()) = actualRhs; + } + + internal::product_triangular_matrix_vector + + ::run(actualLhs.rows(),actualLhs.cols(), + actualLhs.data(),actualLhs.outerStride(), + actualRhsPtr,1, + dest.data(),dest.innerStride(), + actualAlpha); + + if((!DirectlyUseRhs) && freeRhsPtr) ei_aligned_stack_delete(RhsScalar, actualRhsPtr, prod.rhs().size()); + } +}; + +} // end namespace internal + #endif // EIGEN_TRIANGULARMATRIXVECTOR_H diff --git a/gtsam/3rdparty/Eigen/src/Core/products/TriangularSolverMatrix.h b/gtsam/3rdparty/Eigen/src/Core/products/TriangularSolverMatrix.h index 7163a800a..8b9143c2b 100644 --- a/gtsam/3rdparty/Eigen/src/Core/products/TriangularSolverMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Core/products/TriangularSolverMatrix.h @@ -25,16 +25,18 @@ #ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H #define EIGEN_TRIANGULAR_SOLVER_MATRIX_H +namespace internal { + // if the rhs is row major, let's transpose the product template -struct ei_triangular_solve_matrix +struct triangular_solve_matrix { static EIGEN_DONT_INLINE void run( Index size, Index cols, const Scalar* tri, Index triStride, Scalar* _other, Index otherStride) { - ei_triangular_solve_matrix< + triangular_solve_matrix< Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft, (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper), NumTraits::IsComplex && Conjugate, @@ -46,7 +48,7 @@ struct ei_triangular_solve_matrix -struct ei_triangular_solve_matrix +struct triangular_solve_matrix { static EIGEN_DONT_INLINE void run( Index size, Index otherSize, @@ -54,10 +56,10 @@ struct ei_triangular_solve_matrix tri(_tri,triStride); - ei_blas_data_mapper other(_other,otherStride); + const_blas_data_mapper tri(_tri,triStride); + blas_data_mapper other(_other,otherStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; enum { SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), IsLower = (Mode&Lower) == Lower @@ -74,10 +76,10 @@ struct ei_triangular_solve_matrix conj; - ei_gebp_kernel gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; + conj_if conj; + gebp_kernel gebp_kernel; + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; for(Index k2=IsLower ? 0 : size; IsLower ? k20; @@ -181,7 +183,7 @@ struct ei_triangular_solve_matrix -struct ei_triangular_solve_matrix +struct triangular_solve_matrix { static EIGEN_DONT_INLINE void run( Index size, Index otherSize, @@ -189,10 +191,10 @@ struct ei_triangular_solve_matrix rhs(_tri,triStride); - ei_blas_data_mapper lhs(_other,otherStride); + const_blas_data_mapper rhs(_tri,triStride); + blas_data_mapper lhs(_other,otherStride); - typedef ei_gebp_traits Traits; + typedef gebp_traits Traits; enum { RhsStorageOrder = TriStorageOrder, SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr), @@ -213,11 +215,11 @@ struct ei_triangular_solve_matrix conj; - ei_gebp_kernel gebp_kernel; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_rhs pack_rhs_panel; - ei_gemm_pack_lhs pack_lhs_panel; + conj_if conj; + gebp_kernel gebp_kernel; + gemm_pack_rhs pack_rhs; + gemm_pack_rhs pack_rhs_panel; + gemm_pack_lhs pack_lhs_panel; for(Index k2=IsLower ? size : 0; IsLower ? k2>0 : k2 +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H +#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H + +namespace internal { + +template +struct triangular_solve_vector +{ + static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) + { + triangular_solve_vector::run(size, _lhs, lhsStride, rhs); + } +}; + +// forward and backward substitution, row-major, rhs is a vector +template +struct triangular_solve_vector +{ + enum { + IsLower = ((Mode&Lower)==Lower) + }; + static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) + { + typedef Map, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); + typename internal::conditional< + Conjugate, + const CwiseUnaryOp,LhsMap>, + const LhsMap&> + ::type cjLhs(lhs); + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + for(Index pi=IsLower ? 0 : size; + IsLower ? pi0; + IsLower ? pi+=PanelWidth : pi-=PanelWidth) + { + Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); + + Index r = IsLower ? pi : size - pi; // remaining size + if (r > 0) + { + // let's directly call the low level product function because: + // 1 - it is faster to compile + // 2 - it is slighlty faster at runtime + Index startRow = IsLower ? pi : pi-actualPanelWidth; + Index startCol = IsLower ? 0 : pi; + + general_matrix_vector_product::run( + actualPanelWidth, r, + &lhs.coeffRef(startRow,startCol), lhsStride, + rhs + startCol, 1, + rhs + startRow, 1, + RhsScalar(-1)); + } + + for(Index k=0; k0) + rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map >(rhs+s,k))).sum(); + + if(!(Mode & UnitDiag)) + rhs[i] /= cjLhs(i,i); + } + } + } +}; + +// forward and backward substitution, column-major, rhs is a vector +template +struct triangular_solve_vector +{ + enum { + IsLower = ((Mode&Lower)==Lower) + }; + static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) + { + typedef Map, 0, OuterStride<> > LhsMap; + const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); + typename internal::conditional,LhsMap>, + const LhsMap& + >::type cjLhs(lhs); + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + + for(Index pi=IsLower ? 0 : size; + IsLower ? pi0; + IsLower ? pi+=PanelWidth : pi-=PanelWidth) + { + Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); + Index startBlock = IsLower ? pi : pi-actualPanelWidth; + Index endBlock = IsLower ? pi + actualPanelWidth : 0; + + for(Index k=0; k0) + Map >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r); + } + Index r = IsLower ? size - endBlock : startBlock; // remaining size + if (r > 0) + { + // let's directly call the low level product function because: + // 1 - it is faster to compile + // 2 - it is slighlty faster at runtime + general_matrix_vector_product::run( + r, actualPanelWidth, + &lhs.coeffRef(endBlock,startBlock), lhsStride, + rhs+startBlock, 1, + rhs+endBlock, 1, RhsScalar(-1)); + } + } + } +}; + +} // end namespace internal + +#endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/BlasUtil.h b/gtsam/3rdparty/Eigen/src/Core/util/BlasUtil.h index 972814dc9..f1d93d2f8 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/BlasUtil.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/BlasUtil.h @@ -28,109 +28,111 @@ // This file contains many lightweight helper classes used to // implement and control fast level 2 and level 3 BLAS-like routines. +namespace internal { + // forward declarations template -struct ei_gebp_kernel; +struct gebp_kernel; template -struct ei_gemm_pack_rhs; +struct gemm_pack_rhs; template -struct ei_gemm_pack_lhs; +struct gemm_pack_lhs; template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResStorageOrder> -struct ei_general_matrix_matrix_product; +struct general_matrix_matrix_product; template -struct ei_general_matrix_vector_product; +struct general_matrix_vector_product; -template struct ei_conj_if; +template struct conj_if; -template<> struct ei_conj_if { +template<> struct conj_if { template - inline T operator()(const T& x) { return ei_conj(x); } + inline T operator()(const T& x) { return conj(x); } }; -template<> struct ei_conj_if { +template<> struct conj_if { template inline const T& operator()(const T& x) { return x; } }; -template struct ei_conj_helper +template struct conj_helper { - EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return ei_pmadd(x,y,c); } - EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return ei_pmul(x,y); } + EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); } + EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); } }; -template struct ei_conj_helper, std::complex, false,true> +template struct conj_helper, std::complex, false,true> { typedef std::complex Scalar; EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return c + pmul(x,y); } EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const - { return Scalar(ei_real(x)*ei_real(y) + ei_imag(x)*ei_imag(y), ei_imag(x)*ei_real(y) - ei_real(x)*ei_imag(y)); } + { return Scalar(real(x)*real(y) + imag(x)*imag(y), imag(x)*real(y) - real(x)*imag(y)); } }; -template struct ei_conj_helper, std::complex, true,false> +template struct conj_helper, std::complex, true,false> { typedef std::complex Scalar; EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return c + pmul(x,y); } EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const - { return Scalar(ei_real(x)*ei_real(y) + ei_imag(x)*ei_imag(y), ei_real(x)*ei_imag(y) - ei_imag(x)*ei_real(y)); } + { return Scalar(real(x)*real(y) + imag(x)*imag(y), real(x)*imag(y) - imag(x)*real(y)); } }; -template struct ei_conj_helper, std::complex, true,true> +template struct conj_helper, std::complex, true,true> { typedef std::complex Scalar; EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return c + pmul(x,y); } EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const - { return Scalar(ei_real(x)*ei_real(y) - ei_imag(x)*ei_imag(y), - ei_real(x)*ei_imag(y) - ei_imag(x)*ei_real(y)); } + { return Scalar(real(x)*real(y) - imag(x)*imag(y), - real(x)*imag(y) - imag(x)*real(y)); } }; -template struct ei_conj_helper, RealScalar, Conj,false> +template struct conj_helper, RealScalar, Conj,false> { typedef std::complex Scalar; EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const - { return ei_padd(c, pmul(x,y)); } + { return padd(c, pmul(x,y)); } EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const - { return ei_conj_if()(x)*y; } + { return conj_if()(x)*y; } }; -template struct ei_conj_helper, false,Conj> +template struct conj_helper, false,Conj> { typedef std::complex Scalar; EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const - { return ei_padd(c, pmul(x,y)); } + { return padd(c, pmul(x,y)); } EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const - { return x*ei_conj_if()(y); } + { return x*conj_if()(y); } }; -template struct ei_get_factor { +template struct get_factor { EIGEN_STRONG_INLINE static To run(const From& x) { return x; } }; -template struct ei_get_factor::Real> { - EIGEN_STRONG_INLINE static typename NumTraits::Real run(const Scalar& x) { return ei_real(x); } +template struct get_factor::Real> { + EIGEN_STRONG_INLINE static typename NumTraits::Real run(const Scalar& x) { return real(x); } }; // Lightweight helper class to access matrix coefficients. // Yes, this is somehow redundant with Map<>, but this version is much much lighter, // and so I hope better compilation performance (time and code quality). template -class ei_blas_data_mapper +class blas_data_mapper { public: - ei_blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {} + blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {} EIGEN_STRONG_INLINE Scalar& operator()(Index i, Index j) { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; } protected: @@ -140,10 +142,10 @@ class ei_blas_data_mapper // lightweight helper class to access matrix coefficients (const version) template -class ei_const_blas_data_mapper +class const_blas_data_mapper { public: - ei_const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {} + const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {} EIGEN_STRONG_INLINE const Scalar& operator()(Index i, Index j) const { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; } protected: @@ -155,9 +157,9 @@ class ei_const_blas_data_mapper /* Helper class to analyze the factors of a Product expression. * In particular it allows to pop out operator-, scalar multiples, * and conjugate */ -template struct ei_blas_traits +template struct blas_traits { - typedef typename ei_traits::Scalar Scalar; + typedef typename traits::Scalar Scalar; typedef const XprType& ExtractType; typedef XprType _ExtractType; enum { @@ -165,77 +167,75 @@ template struct ei_blas_traits IsTransposed = false, NeedToConjugate = false, HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit) - && ( /* Uncomment this when the low-level matrix-vector product functions support strided vectors - bool(XprType::IsVectorAtCompileTime) - || */ - int(ei_inner_stride_at_compile_time::ret) == 1) - ) ? 1 : 0 + && ( bool(XprType::IsVectorAtCompileTime) + || int(inner_stride_at_compile_time::ret) == 1) + ) ? 1 : 0 }; - typedef typename ei_meta_if::ret DirectLinearAccessType; - static inline ExtractType extract(const XprType& x) { return x; } - static inline Scalar extractScalarFactor(const XprType&) { return Scalar(1); } + >::type DirectLinearAccessType; + static inline const ExtractType extract(const XprType& x) { return x; } + static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); } }; // pop conjugate template -struct ei_blas_traits, NestedXpr> > - : ei_blas_traits +struct blas_traits, NestedXpr> > + : blas_traits { - typedef ei_blas_traits Base; - typedef CwiseUnaryOp, NestedXpr> XprType; + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; typedef typename Base::ExtractType ExtractType; enum { IsComplex = NumTraits::IsComplex, NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex }; - static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } - static inline Scalar extractScalarFactor(const XprType& x) { return ei_conj(Base::extractScalarFactor(x.nestedExpression())); } + static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); } }; // pop scalar multiple template -struct ei_blas_traits, NestedXpr> > - : ei_blas_traits +struct blas_traits, NestedXpr> > + : blas_traits { - typedef ei_blas_traits Base; - typedef CwiseUnaryOp, NestedXpr> XprType; + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; typedef typename Base::ExtractType ExtractType; - static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) { return x.functor().m_other * Base::extractScalarFactor(x.nestedExpression()); } }; // pop opposite template -struct ei_blas_traits, NestedXpr> > - : ei_blas_traits +struct blas_traits, NestedXpr> > + : blas_traits { - typedef ei_blas_traits Base; - typedef CwiseUnaryOp, NestedXpr> XprType; + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; typedef typename Base::ExtractType ExtractType; - static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) { return - Base::extractScalarFactor(x.nestedExpression()); } }; // pop/push transpose template -struct ei_blas_traits > - : ei_blas_traits +struct blas_traits > + : blas_traits { typedef typename NestedXpr::Scalar Scalar; - typedef ei_blas_traits Base; + typedef blas_traits Base; typedef Transpose XprType; - typedef Transpose ExtractType; - typedef Transpose _ExtractType; - typedef typename ei_meta_if ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS + typedef Transpose _ExtractType; + typedef typename conditional::ret DirectLinearAccessType; + >::type DirectLinearAccessType; enum { IsTransposed = Base::IsTransposed ? 0 : 1 }; @@ -243,22 +243,29 @@ struct ei_blas_traits > static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); } }; -template::HasUsableDirectAccess> -struct ei_extract_data_selector { +template +struct blas_traits + : blas_traits +{}; + +template::HasUsableDirectAccess> +struct extract_data_selector { static const typename T::Scalar* run(const T& m) { - return &ei_blas_traits::extract(m).const_cast_derived().coeffRef(0,0); // FIXME this should be .data() + return const_cast(&blas_traits::extract(m).coeffRef(0,0)); // FIXME this should be .data() } }; template -struct ei_extract_data_selector { +struct extract_data_selector { static typename T::Scalar* run(const T&) { return 0; } }; -template const typename T::Scalar* ei_extract_data(const T& m) +template const typename T::Scalar* extract_data(const T& m) { - return ei_extract_data_selector::run(m); + return extract_data_selector::run(m); } +} // end namespace internal + #endif // EIGEN_BLASUTIL_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/Constants.h b/gtsam/3rdparty/Eigen/src/Core/util/Constants.h index 60da5c76a..2ffeb7948 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/Constants.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/Constants.h @@ -56,7 +56,8 @@ const int Infinity = -1; * for a matrix, this means that the storage order is row-major. * If this bit is not set, the storage order is column-major. * For an expression, this determines the storage order of - * the matrix created by evaluation of that expression. */ + * the matrix created by evaluation of that expression. + * \sa \ref TopicStorageOrders */ const unsigned int RowMajorBit = 0x1; /** \ingroup flags @@ -125,27 +126,33 @@ const unsigned int LinearAccessBit = 0x10; /** \ingroup flags * - * Means that the underlying array of coefficients can be directly accessed. This means two things. - * First, references to the coefficients must be available through coeffRef(int, int). This rules out read-only - * expressions whose coefficients are computed on demand by coeff(int, int). Second, the memory layout of the - * array of coefficients must be exactly the natural one suggested by rows(), cols(), outerStride(), innerStride(), and the RowMajorBit. - * This rules out expressions such as Diagonal, whose coefficients, though referencable, do not have - * such a regular memory layout. + * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable. + * This rules out read-only expressions. + * + * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note + * the other: + * \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit + * \li Map-to-const expressions, for example Map, have DirectAccessBit but not LvalueBit + * + * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value. */ -const unsigned int DirectAccessBit = 0x20; +const unsigned int LvalueBit = 0x20; + +/** \ingroup flags + * + * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout + * of the array of coefficients must be exactly the natural one suggested by rows(), cols(), + * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients, + * though referencable, do not have such a regular memory layout. + * + * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal. + */ +const unsigned int DirectAccessBit = 0x40; /** \ingroup flags * * means the first coefficient packet is guaranteed to be aligned */ -const unsigned int AlignedBit = 0x40; - -/** \ingroup flags - * - * Means the expression is writable. Note that DirectAccessBit implies LvalueBit. - * Internaly, it is mainly used to enable the writable coeff accessors, and makes - * the read-only coeff accessors to return by const reference. - */ -const unsigned int LvalueBit = 0x80; +const unsigned int AlignedBit = 0x80; const unsigned int NestByRefBit = 0x100; @@ -204,9 +211,11 @@ enum { DontAlign = 0x2 }; +/** \brief Enum for specifying whether to apply or solve on the left or right. + */ enum { - OnTheLeft = 1, - OnTheRight = 2 + OnTheLeft = 1, /**< \brief Apply transformation on the left. */ + OnTheRight = 2 /**< \brief Apply transformation on the right. */ }; /* the following could as well be written: @@ -236,7 +245,7 @@ enum { }; enum AccessorLevels { - ReadOnlyAccessors, WriteAccessors, DirectAccessors + ReadOnlyAccessors, WriteAccessors, DirectAccessors, DirectWriteAccessors }; enum DecompositionOptions { diff --git a/gtsam/3rdparty/Eigen/src/Core/util/DisableMSVCWarnings.h b/gtsam/3rdparty/Eigen/src/Core/util/DisableMSVCWarnings.h deleted file mode 100644 index f6b94bc55..000000000 --- a/gtsam/3rdparty/Eigen/src/Core/util/DisableMSVCWarnings.h +++ /dev/null @@ -1,16 +0,0 @@ - -#ifdef _MSC_VER - // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p)) - // 4101 - unreferenced local variable - // 4127 - conditional expression is constant - // 4181 - qualifier applied to reference type ignored - // 4211 - nonstandard extension used : redefined extern to static - // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data - // 4273 - QtAlignedMalloc, inconsistent DLL linkage - // 4324 - structure was padded due to declspec(align()) - // 4512 - assignment operator could not be generated - // 4522 - 'class' : multiple assignment operators specified - // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow - #pragma warning( push ) - #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4512 4522 4717 ) -#endif diff --git a/gtsam/3rdparty/Eigen/src/Core/util/DisableStupidWarnings.h b/gtsam/3rdparty/Eigen/src/Core/util/DisableStupidWarnings.h new file mode 100644 index 000000000..00730524b --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Core/util/DisableStupidWarnings.h @@ -0,0 +1,42 @@ +#ifndef EIGEN_WARNINGS_DISABLED +#define EIGEN_WARNINGS_DISABLED + +#ifdef _MSC_VER + // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p)) + // 4101 - unreferenced local variable + // 4127 - conditional expression is constant + // 4181 - qualifier applied to reference type ignored + // 4211 - nonstandard extension used : redefined extern to static + // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data + // 4273 - QtAlignedMalloc, inconsistent DLL linkage + // 4324 - structure was padded due to declspec(align()) + // 4512 - assignment operator could not be generated + // 4522 - 'class' : multiple assignment operators specified + // 4700 - uninitialized local variable 'xyz' used + // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma warning( push ) + #endif + #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4512 4522 4700 4717 ) +#elif defined __INTEL_COMPILER + // 2196 - routine is both "inline" and "noinline" ("noinline" assumed) + // ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body + // 2536 - type qualifiers are meaningless here + // ICC 12 generates this warning when a function return type is const qualified, even if that type is a template-parameter-dependent + // typedef that may be a reference type. + // 279 - controlling expression is constant + // ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case. + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma warning push + #endif + #pragma warning disable 2196 2536 279 +#elif defined __clang__ + // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant + // this is really a stupid warning as it warns on compile-time expressions involving enums + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma clang diagnostic push + #endif + #pragma clang diagnostic ignored "-Wconstant-logical-operand" +#endif + +#endif // not EIGEN_WARNINGS_DISABLED diff --git a/gtsam/3rdparty/Eigen/src/Core/util/EnableMSVCWarnings.h b/gtsam/3rdparty/Eigen/src/Core/util/EnableMSVCWarnings.h deleted file mode 100644 index 8bd61601e..000000000 --- a/gtsam/3rdparty/Eigen/src/Core/util/EnableMSVCWarnings.h +++ /dev/null @@ -1,4 +0,0 @@ - -#ifdef _MSC_VER - #pragma warning( pop ) -#endif diff --git a/gtsam/3rdparty/Eigen/src/Core/util/ForwardDeclarations.h b/gtsam/3rdparty/Eigen/src/Core/util/ForwardDeclarations.h index f6cf3fe41..7fbccf98c 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/ForwardDeclarations.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/ForwardDeclarations.h @@ -26,27 +26,59 @@ #ifndef EIGEN_FORWARDDECLARATIONS_H #define EIGEN_FORWARDDECLARATIONS_H -template struct ei_traits; -template struct NumTraits; +namespace internal { -template struct ei_has_direct_access +template struct traits; + +// here we say once and for all that traits == traits +// When constness must affect traits, it has to be constness on template parameters on which T itself depends. +// For example, traits > != traits >, but +// traits > == traits > +template struct traits : traits {}; + +template struct has_direct_access { - enum { ret = (ei_traits::Flags & DirectAccessBit) ? 1 : 0 }; + enum { ret = (traits::Flags & DirectAccessBit) ? 1 : 0 }; }; +template struct accessors_level +{ + enum { has_direct_access = (traits::Flags & DirectAccessBit) ? 1 : 0, + has_write_access = (traits::Flags & LvalueBit) ? 1 : 0, + value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors) + : (has_write_access ? WriteAccessors : ReadOnlyAccessors) + }; +}; + +} // end namespace internal + +template struct NumTraits; + template struct EigenBase; template class DenseBase; +template class PlainObjectBase; + + template::Flags & DirectAccessBit) ? DirectAccessors - : (ei_traits::Flags & LvalueBit) ? WriteAccessors - : ReadOnlyAccessors> + int Level = internal::accessors_level::value > class DenseCoeffsBase; template class Matrix; @@ -61,7 +93,7 @@ template class ForceAlignedAccess; template class SwapWrapper; template::ret> class Block; + bool HasDirectAccess = internal::has_direct_access::ret> class Block; template class VectorBlock; template class Transpose; @@ -79,10 +111,17 @@ template class DiagonalBase; template class DiagonalWrapper; template class DiagonalMatrix; template class DiagonalProduct; -template class Diagonal; -template class PermutationMatrix; -template class Transpositions; +template class Diagonal; +template class PermutationMatrix; +template class Transpositions; +template class PermutationBase; +template class TranspositionsBase; +template class PermutationWrapper; +template class TranspositionsWrapper; +template::has_write_access ? WriteAccessors : ReadOnlyAccessors +> class MapBase; template class Stride; template > class Map; @@ -95,67 +134,92 @@ template struct CommaInitializer; template class ReturnByValue; template class ArrayWrapper; -template struct ei_solve_retval_base; -template struct ei_solve_retval; -template struct ei_kernel_retval_base; -template struct ei_kernel_retval; -template struct ei_image_retval_base; -template struct ei_image_retval; +namespace internal { +template struct solve_retval_base; +template struct solve_retval; +template struct kernel_retval_base; +template struct kernel_retval; +template struct image_retval_base; +template struct image_retval; +} // end namespace internal +namespace internal { template class BandMatrix; +} + +namespace internal { +template struct product_type; +} -template struct ei_product_type; template::value> + int ProductType = internal::product_type::value> struct ProductReturnType; // this is a workaround for sun CC template struct LazyProductReturnType; +namespace internal { + // Provides scalar/packet-wise product and product with accumulation // with optional conjugation of the arguments. -template struct ei_conj_helper; +template struct conj_helper; -template struct ei_scalar_sum_op; -template struct ei_scalar_difference_op; -template struct ei_scalar_conj_product_op; -template struct ei_scalar_quotient_op; -template struct ei_scalar_opposite_op; -template struct ei_scalar_conjugate_op; -template struct ei_scalar_real_op; -template struct ei_scalar_imag_op; -template struct ei_scalar_abs_op; -template struct ei_scalar_abs2_op; -template struct ei_scalar_sqrt_op; -template struct ei_scalar_exp_op; -template struct ei_scalar_log_op; -template struct ei_scalar_cos_op; -template struct ei_scalar_sin_op; -template struct ei_scalar_pow_op; -template struct ei_scalar_inverse_op; -template struct ei_scalar_square_op; -template struct ei_scalar_cube_op; -template struct ei_scalar_cast_op; -template struct ei_scalar_multiple_op; -template struct ei_scalar_quotient1_op; -template struct ei_scalar_min_op; -template struct ei_scalar_max_op; -template struct ei_scalar_random_op; -template struct ei_scalar_add_op; -template struct ei_scalar_constant_op; -template struct ei_scalar_identity_op; +template struct scalar_sum_op; +template struct scalar_difference_op; +template struct scalar_conj_product_op; +template struct scalar_quotient_op; +template struct scalar_opposite_op; +template struct scalar_conjugate_op; +template struct scalar_real_op; +template struct scalar_imag_op; +template struct scalar_abs_op; +template struct scalar_abs2_op; +template struct scalar_sqrt_op; +template struct scalar_exp_op; +template struct scalar_log_op; +template struct scalar_cos_op; +template struct scalar_sin_op; +template struct scalar_acos_op; +template struct scalar_asin_op; +template struct scalar_tan_op; +template struct scalar_pow_op; +template struct scalar_inverse_op; +template struct scalar_square_op; +template struct scalar_cube_op; +template struct scalar_cast_op; +template struct scalar_multiple_op; +template struct scalar_quotient1_op; +template struct scalar_min_op; +template struct scalar_max_op; +template struct scalar_random_op; +template struct scalar_add_op; +template struct scalar_constant_op; +template struct scalar_identity_op; -template struct ei_scalar_product_op; -template struct ei_scalar_multiple2_op; +template struct scalar_product_op; +template struct scalar_multiple2_op; + +} // end namespace internal struct IOFormat; // Array module template class Array; template class Select; template class PartialReduxExpr; @@ -165,7 +229,9 @@ template class Reverse; template class FullPivLU; template class PartialPivLU; -template struct ei_inverse_impl; +namespace internal { +template struct inverse_impl; +} template class HouseholderQR; template class ColPivHouseholderQR; template class FullPivHouseholderQR; @@ -173,36 +239,69 @@ template class LLT; template class LDLT; template class HouseholderSequence; -template class PlanarRotation; +template class JacobiRotation; // Geometry module: template class RotationBase; template class Cross; template class QuaternionBase; -template class Quaternion; template class Rotation2D; template class AngleAxis; -template class Transform; +template class Translation; + +#ifdef EIGEN2_SUPPORT +template class eigen2_RotationBase; +template class eigen2_Cross; +template class eigen2_Quaternion; +template class eigen2_Rotation2D; +template class eigen2_AngleAxis; +template class eigen2_Transform; +template class eigen2_ParametrizedLine; +template class eigen2_Hyperplane; +template class eigen2_Translation; +template class eigen2_Scaling; +#endif + +#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS +template class Quaternion; +template class Transform; template class ParametrizedLine; template class Hyperplane; -template class Translation; +template class Scaling; +#endif + +#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS +template class Quaternion; +template class Transform; +template class ParametrizedLine; +template class Hyperplane; template class UniformScaling; template class Homogeneous; +#endif // MatrixFunctions module template struct MatrixExponentialReturnValue; template class MatrixFunctionReturnValue; + +namespace internal { template -struct ei_stem_function +struct stem_function { typedef std::complex::Real> ComplexScalar; typedef ComplexScalar type(ComplexScalar, int); }; +} #ifdef EIGEN2_SUPPORT template class Cwise; template class Minor; +template class LU; +template class QR; +template class SVD; +namespace internal { +template struct eigen2_part_return_type; +} #endif #endif // EIGEN_FORWARDDECLARATIONS_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/Macros.h b/gtsam/3rdparty/Eigen/src/Core/util/Macros.h index 2bf509f8f..f0e5ea3ce 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/Macros.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/Macros.h @@ -26,18 +26,31 @@ #ifndef EIGEN_MACROS_H #define EIGEN_MACROS_H -#define EIGEN_WORLD_VERSION 2 -#define EIGEN_MAJOR_VERSION 92 +#define EIGEN_WORLD_VERSION 3 +#define EIGEN_MAJOR_VERSION 0 #define EIGEN_MINOR_VERSION 0 #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ EIGEN_MINOR_VERSION>=z)))) #ifdef __GNUC__ - #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x) + #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x) #else #define EIGEN_GNUC_AT_LEAST(x,y) 0 #endif + +#ifdef __GNUC__ + #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__ // for abort + #include // for std::cerr + + namespace Eigen { + namespace internal { + // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers. + // see bug 89. + namespace { + EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; } + } + inline void assert_fail(const char *condition, const char *function, const char *file, int line) + { + std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl; + abort(); + } + } + } + #define eigen_plain_assert(x) \ + do { \ + if(!Eigen::internal::copy_bool(x)) \ + Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \ + } while(false) + #endif +#endif + +// eigen_assert can be overridden +#ifndef eigen_assert +#define eigen_assert(x) eigen_plain_assert(x) +#endif + +#ifdef EIGEN_INTERNAL_DEBUGGING +#define eigen_internal_assert(x) eigen_assert(x) +#else +#define eigen_internal_assert(x) +#endif + +#ifdef EIGEN_NO_DEBUG +#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x +#else +#define EIGEN_ONLY_USED_FOR_DEBUG(x) +#endif + #if (defined __GNUC__) #define EIGEN_DEPRECATED __attribute__((deprecated)) #elif (defined _MSC_VER) @@ -218,9 +261,7 @@ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link * vectorized and non-vectorized code. */ -#if !EIGEN_ALIGN_STATICALLY - #define EIGEN_ALIGN_TO_BOUNDARY(n) -#elif (defined __GNUC__) || (defined __PGI) +#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n))) #elif (defined _MSC_VER) #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n)) @@ -233,6 +274,14 @@ #define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16) +#if EIGEN_ALIGN_STATICALLY +#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n) +#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16 +#else +#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) +#define EIGEN_USER_ALIGN16 +#endif + #ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD #define EIGEN_RESTRICT #endif @@ -245,31 +294,18 @@ #endif #ifndef EIGEN_DEFAULT_IO_FORMAT +#ifdef EIGEN_MAKING_DOCS +// format used in Eigen's documentation +// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic. +#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "") +#else #define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat() #endif +#endif // just an empty macro ! #define EIGEN_EMPTY -// concatenate two tokens -#define EIGEN_CAT2(a,b) a ## b -#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b) - -// convert a token to a string -#define EIGEN_MAKESTRING2(a) #a -#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a) - -// format used in Eigen's documentation -// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic. -#define EIGEN_DOCS_IO_FORMAT IOFormat(3, 0, " ", "\n", "", "") - -// C++0x features -#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600)) - #define EIGEN_REF_TO_TEMPORARY const & -#else - #define EIGEN_REF_TO_TEMPORARY const & -#endif - #if defined(_MSC_VER) && (!defined(__INTEL_COMPILER)) #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ using Base::operator =; @@ -295,35 +331,35 @@ **/ #define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ - typedef typename Eigen::ei_traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ + typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ - typedef typename Eigen::ei_nested::type Nested; \ - typedef typename Eigen::ei_traits::StorageKind StorageKind; \ - typedef typename Eigen::ei_traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ - Flags = Eigen::ei_traits::Flags, \ - CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ + typedef typename Eigen::internal::nested::type Nested; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::traits::Index Index; \ + enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ + Flags = Eigen::internal::traits::Flags, \ + CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; #define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ - typedef typename Eigen::ei_traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ + typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ typedef typename Base::PacketScalar PacketScalar; \ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ - typedef typename Eigen::ei_nested::type Nested; \ - typedef typename Eigen::ei_traits::StorageKind StorageKind; \ - typedef typename Eigen::ei_traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ - MaxRowsAtCompileTime = Eigen::ei_traits::MaxRowsAtCompileTime, \ - MaxColsAtCompileTime = Eigen::ei_traits::MaxColsAtCompileTime, \ - Flags = Eigen::ei_traits::Flags, \ - CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ + typedef typename Eigen::internal::nested::type Nested; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::traits::Index Index; \ + enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ + MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ + MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime, \ + Flags = Eigen::internal::traits::Flags, \ + CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ @@ -362,21 +398,21 @@ #define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \ template \ - EIGEN_STRONG_INLINE const CwiseBinaryOp, Derived, OtherDerived> \ + EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> \ METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ { \ - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); \ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); \ } // the expression type of a cwise product #define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \ CwiseBinaryOp< \ - ei_scalar_product_op< \ - typename ei_traits::Scalar, \ - typename ei_traits::Scalar \ + internal::scalar_product_op< \ + typename internal::traits::Scalar, \ + typename internal::traits::Scalar \ >, \ - LHS, \ - RHS \ + const LHS, \ + const RHS \ > #endif // EIGEN_MACROS_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/Memory.h b/gtsam/3rdparty/Eigen/src/Core/util/Memory.h index 362ccf18e..8ccf0e603 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/Memory.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/Memory.h @@ -80,6 +80,8 @@ #define EIGEN_HAS_MM_MALLOC 0 #endif +namespace internal { + /***************************************************************************** *** Implementation of handmade aligned functions *** *****************************************************************************/ @@ -89,7 +91,7 @@ /** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned. * Fast, but wastes 16 additional bytes of memory. Does not throw any exception. */ -inline void* ei_handmade_aligned_malloc(size_t size) +inline void* handmade_aligned_malloc(size_t size) { void *original = std::malloc(size+16); if (original == 0) return 0; @@ -98,8 +100,8 @@ inline void* ei_handmade_aligned_malloc(size_t size) return aligned; } -/** \internal Frees memory allocated with ei_handmade_aligned_malloc */ -inline void ei_handmade_aligned_free(void *ptr) +/** \internal Frees memory allocated with handmade_aligned_malloc */ +inline void handmade_aligned_free(void *ptr) { if (ptr) std::free(*(reinterpret_cast(ptr) - 1)); } @@ -109,9 +111,9 @@ inline void ei_handmade_aligned_free(void *ptr) * Since we know that our handmade version is based on std::realloc * we can use std::realloc to implement efficient reallocation. */ -inline void* ei_handmade_aligned_realloc(void* ptr, size_t size, size_t = 0) +inline void* handmade_aligned_realloc(void* ptr, size_t size, size_t = 0) { - if (ptr == 0) return ei_handmade_aligned_malloc(size); + if (ptr == 0) return handmade_aligned_malloc(size); void *original = *(reinterpret_cast(ptr) - 1); original = std::realloc(original,size+16); if (original == 0) return 0; @@ -124,26 +126,26 @@ inline void* ei_handmade_aligned_realloc(void* ptr, size_t size, size_t = 0) *** Implementation of generic aligned realloc (when no realloc can be used)*** *****************************************************************************/ -void* ei_aligned_malloc(size_t size); -void ei_aligned_free(void *ptr); +void* aligned_malloc(size_t size); +void aligned_free(void *ptr); /** \internal * \brief Reallocates aligned memory. * Allows reallocation with aligned ptr types. This implementation will * always create a new memory chunk and copy the old data. */ -inline void* ei_generic_aligned_realloc(void* ptr, size_t size, size_t old_size) +inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size) { if (ptr==0) - return ei_aligned_malloc(size); + return aligned_malloc(size); if (size==0) { - ei_aligned_free(ptr); + aligned_free(ptr); return 0; } - void* newptr = ei_aligned_malloc(size); + void* newptr = aligned_malloc(size); if (newptr == 0) { #ifdef EIGEN_HAS_ERRNO @@ -155,7 +157,7 @@ inline void* ei_generic_aligned_realloc(void* ptr, size_t size, size_t old_size) if (ptr != 0) { std::memcpy(newptr, ptr, std::min(size,old_size)); - ei_aligned_free(ptr); + aligned_free(ptr); } return newptr; @@ -165,14 +167,36 @@ inline void* ei_generic_aligned_realloc(void* ptr, size_t size, size_t old_size) *** Implementation of portable aligned versions of malloc/free/realloc *** *****************************************************************************/ +#ifdef EIGEN_NO_MALLOC +inline void check_that_malloc_is_allowed() +{ + eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); +} +#elif defined EIGEN_RUNTIME_NO_MALLOC +inline bool is_malloc_allowed_impl(bool update, bool new_value = false) +{ + static bool value = true; + if (update == 1) + value = new_value; + return value; +} +inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); } +inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); } +inline void check_that_malloc_is_allowed() +{ + eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)"); +} +#else +inline void check_that_malloc_is_allowed() +{} +#endif + /** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment. * On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown. */ -inline void* ei_aligned_malloc(size_t size) +inline void* aligned_malloc(size_t size) { - #ifdef EIGEN_NO_MALLOC - ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); - #endif + check_that_malloc_is_allowed(); void *result; #if !EIGEN_ALIGN @@ -186,7 +210,7 @@ inline void* ei_aligned_malloc(size_t size) #elif (defined _MSC_VER) result = _aligned_malloc(size, 16); #else - result = ei_handmade_aligned_malloc(size); + result = handmade_aligned_malloc(size); #endif #ifdef EIGEN_EXCEPTIONS @@ -196,8 +220,8 @@ inline void* ei_aligned_malloc(size_t size) return result; } -/** \internal Frees memory allocated with ei_aligned_malloc. */ -inline void ei_aligned_free(void *ptr) +/** \internal Frees memory allocated with aligned_malloc. */ +inline void aligned_free(void *ptr) { #if !EIGEN_ALIGN std::free(ptr); @@ -210,7 +234,7 @@ inline void ei_aligned_free(void *ptr) #elif defined(_MSC_VER) _aligned_free(ptr); #else - ei_handmade_aligned_free(ptr); + handmade_aligned_free(ptr); #endif } @@ -219,7 +243,7 @@ inline void ei_aligned_free(void *ptr) * \brief Reallocates an aligned block of memory. * \throws std::bad_alloc if EIGEN_EXCEPTIONS are defined. **/ -inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) +inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size) { EIGEN_UNUSED_VARIABLE(old_size); @@ -229,7 +253,7 @@ inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) #elif EIGEN_MALLOC_ALREADY_ALIGNED result = std::realloc(ptr,new_size); #elif EIGEN_HAS_POSIX_MEMALIGN - result = ei_generic_aligned_realloc(ptr,new_size,old_size); + result = generic_aligned_realloc(ptr,new_size,old_size); #elif EIGEN_HAS_MM_MALLOC // The defined(_mm_free) is just here to verify that this MSVC version // implements _mm_malloc/_mm_free based on the corresponding _aligned_ @@ -237,12 +261,12 @@ inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) #if defined(_MSC_VER) && defined(_mm_free) result = _aligned_realloc(ptr,new_size,16); #else - result = ei_generic_aligned_realloc(ptr,new_size,old_size); + result = generic_aligned_realloc(ptr,new_size,old_size); #endif #elif defined(_MSC_VER) result = _aligned_realloc(ptr,new_size,16); #else - result = ei_handmade_aligned_realloc(ptr,new_size,old_size); + result = handmade_aligned_realloc(ptr,new_size,old_size); #endif #ifdef EIGEN_EXCEPTIONS @@ -259,16 +283,14 @@ inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) /** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. * On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown. */ -template inline void* ei_conditional_aligned_malloc(size_t size) +template inline void* conditional_aligned_malloc(size_t size) { - return ei_aligned_malloc(size); + return aligned_malloc(size); } -template<> inline void* ei_conditional_aligned_malloc(size_t size) +template<> inline void* conditional_aligned_malloc(size_t size) { - #ifdef EIGEN_NO_MALLOC - ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); - #endif + check_that_malloc_is_allowed(); void *result = std::malloc(size); #ifdef EIGEN_EXCEPTIONS @@ -277,23 +299,23 @@ template<> inline void* ei_conditional_aligned_malloc(size_t size) return result; } -/** \internal Frees memory allocated with ei_conditional_aligned_malloc */ -template inline void ei_conditional_aligned_free(void *ptr) +/** \internal Frees memory allocated with conditional_aligned_malloc */ +template inline void conditional_aligned_free(void *ptr) { - ei_aligned_free(ptr); + aligned_free(ptr); } -template<> inline void ei_conditional_aligned_free(void *ptr) +template<> inline void conditional_aligned_free(void *ptr) { std::free(ptr); } -template inline void* ei_conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size) +template inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size) { - return ei_aligned_realloc(ptr, new_size, old_size); + return aligned_realloc(ptr, new_size, old_size); } -template<> inline void* ei_conditional_aligned_realloc(void* ptr, size_t new_size, size_t) +template<> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t) { return std::realloc(ptr, new_size); } @@ -305,7 +327,7 @@ template<> inline void* ei_conditional_aligned_realloc(void* ptr, size_t /** \internal Constructs the elements of an array. * The \a size parameter tells on how many objects to call the constructor of T. */ -template inline T* ei_construct_elements_of_array(T *ptr, size_t size) +template inline T* construct_elements_of_array(T *ptr, size_t size) { for (size_t i=0; i < size; ++i) ::new (ptr + i) T; return ptr; @@ -314,7 +336,7 @@ template inline T* ei_construct_elements_of_array(T *ptr, size_t siz /** \internal Destructs the elements of an array. * The \a size parameters tells on how many objects to call the destructor of T. */ -template inline void ei_destruct_elements_of_array(T *ptr, size_t size) +template inline void destruct_elements_of_array(T *ptr, size_t size) { // always destruct an array starting from the end. if(ptr) @@ -329,44 +351,72 @@ template inline void ei_destruct_elements_of_array(T *ptr, size_t si * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown. * The default constructor of T is called. */ -template inline T* ei_aligned_new(size_t size) +template inline T* aligned_new(size_t size) { - T *result = reinterpret_cast(ei_aligned_malloc(sizeof(T)*size)); - return ei_construct_elements_of_array(result, size); + T *result = reinterpret_cast(aligned_malloc(sizeof(T)*size)); + return construct_elements_of_array(result, size); } -template inline T* ei_conditional_aligned_new(size_t size) +template inline T* conditional_aligned_new(size_t size) { - T *result = reinterpret_cast(ei_conditional_aligned_malloc(sizeof(T)*size)); - return ei_construct_elements_of_array(result, size); + T *result = reinterpret_cast(conditional_aligned_malloc(sizeof(T)*size)); + return construct_elements_of_array(result, size); } -/** \internal Deletes objects constructed with ei_aligned_new +/** \internal Deletes objects constructed with aligned_new * The \a size parameters tells on how many objects to call the destructor of T. */ -template inline void ei_aligned_delete(T *ptr, size_t size) +template inline void aligned_delete(T *ptr, size_t size) { - ei_destruct_elements_of_array(ptr, size); - ei_aligned_free(ptr); + destruct_elements_of_array(ptr, size); + aligned_free(ptr); } -/** \internal Deletes objects constructed with ei_conditional_aligned_new +/** \internal Deletes objects constructed with conditional_aligned_new * The \a size parameters tells on how many objects to call the destructor of T. */ -template inline void ei_conditional_aligned_delete(T *ptr, size_t size) +template inline void conditional_aligned_delete(T *ptr, size_t size) { - ei_destruct_elements_of_array(ptr, size); - ei_conditional_aligned_free(ptr); + destruct_elements_of_array(ptr, size); + conditional_aligned_free(ptr); } -template inline T* ei_conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size) +template inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size) { - T *result = reinterpret_cast(ei_conditional_aligned_realloc(reinterpret_cast(pts), sizeof(T)*new_size, sizeof(T)*old_size)); - if (new_size > old_size) - ei_construct_elements_of_array(result+old_size, new_size-old_size); + if(new_size < old_size) + destruct_elements_of_array(pts+new_size, old_size-new_size); + T *result = reinterpret_cast(conditional_aligned_realloc(reinterpret_cast(pts), sizeof(T)*new_size, sizeof(T)*old_size)); + if(new_size > old_size) + construct_elements_of_array(result+old_size, new_size-old_size); return result; } + +template inline T* conditional_aligned_new_auto(size_t size) +{ + T *result = reinterpret_cast(conditional_aligned_malloc(sizeof(T)*size)); + if(NumTraits::RequireInitialization) + construct_elements_of_array(result, size); + return result; +} + +template inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size) +{ + if(NumTraits::RequireInitialization && (new_size < old_size)) + destruct_elements_of_array(pts+new_size, old_size-new_size); + T *result = reinterpret_cast(conditional_aligned_realloc(reinterpret_cast(pts), sizeof(T)*new_size, sizeof(T)*old_size)); + if(NumTraits::RequireInitialization && (new_size > old_size)) + construct_elements_of_array(result+old_size, new_size-old_size); + return result; +} + +template inline void conditional_aligned_delete_auto(T *ptr, size_t size) +{ + if(NumTraits::RequireInitialization) + destruct_elements_of_array(ptr, size); + conditional_aligned_free(ptr); +} + /****************************************************************************/ /** \internal Returns the index of the first element of the array that is well aligned for vectorization. @@ -383,13 +433,13 @@ template inline T* ei_conditional_aligned_realloc_new(T* * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for * example with Scalar=double on certain 32-bit platforms, see bug #79. * - * There is also the variant ei_first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. + * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. */ template -inline static Index ei_first_aligned(const Scalar* array, Index size) +inline static Index first_aligned(const Scalar* array, Index size) { - typedef typename ei_packet_traits::type Packet; - enum { PacketSize = ei_packet_traits::size, + typedef typename packet_traits::type Packet; + enum { PacketSize = packet_traits::size, PacketAlignedMask = PacketSize-1 }; @@ -412,6 +462,8 @@ inline static Index ei_first_aligned(const Scalar* array, Index size) } } +} // end namespace internal + /***************************************************************************** *** Implementation of runtime stack allocation (falling back to malloc) *** *****************************************************************************/ @@ -431,20 +483,20 @@ inline static Index ei_first_aligned(const Scalar* array, Index size) #if (defined __linux__) #define ei_aligned_stack_alloc(SIZE) (SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) \ ? alloca(SIZE) \ - : ei_aligned_malloc(SIZE) - #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR) + : Eigen::internal::aligned_malloc(SIZE) + #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) Eigen::internal::aligned_free(PTR) #elif defined(_MSC_VER) #define ei_aligned_stack_alloc(SIZE) (SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) \ ? _alloca(SIZE) \ - : ei_aligned_malloc(SIZE) - #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR) + : Eigen::internal::aligned_malloc(SIZE) + #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) Eigen::internal::aligned_free(PTR) #else - #define ei_aligned_stack_alloc(SIZE) ei_aligned_malloc(SIZE) - #define ei_aligned_stack_free(PTR,SIZE) ei_aligned_free(PTR) + #define ei_aligned_stack_alloc(SIZE) Eigen::internal::aligned_malloc(SIZE) + #define ei_aligned_stack_free(PTR,SIZE) Eigen::internal::aligned_free(PTR) #endif -#define ei_aligned_stack_new(TYPE,SIZE) ei_construct_elements_of_array(reinterpret_cast(ei_aligned_stack_alloc(sizeof(TYPE)*SIZE)), SIZE) -#define ei_aligned_stack_delete(TYPE,PTR,SIZE) do {ei_destruct_elements_of_array(PTR, SIZE); \ +#define ei_aligned_stack_new(TYPE,SIZE) Eigen::internal::construct_elements_of_array(reinterpret_cast(ei_aligned_stack_alloc(sizeof(TYPE)*SIZE)), SIZE) +#define ei_aligned_stack_delete(TYPE,PTR,SIZE) do {Eigen::internal::destruct_elements_of_array(PTR, SIZE); \ ei_aligned_stack_free(PTR,sizeof(TYPE)*SIZE);} while(0) @@ -456,26 +508,26 @@ inline static Index ei_first_aligned(const Scalar* array, Index size) #ifdef EIGEN_EXCEPTIONS #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ void* operator new(size_t size, const std::nothrow_t&) throw() { \ - try { return Eigen::ei_conditional_aligned_malloc(size); } \ + try { return Eigen::internal::conditional_aligned_malloc(size); } \ catch (...) { return 0; } \ return 0; \ } #else #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ void* operator new(size_t size, const std::nothrow_t&) throw() { \ - return Eigen::ei_conditional_aligned_malloc(size); \ + return Eigen::internal::conditional_aligned_malloc(size); \ } #endif #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ void *operator new(size_t size) { \ - return Eigen::ei_conditional_aligned_malloc(size); \ + return Eigen::internal::conditional_aligned_malloc(size); \ } \ void *operator new[](size_t size) { \ - return Eigen::ei_conditional_aligned_malloc(size); \ + return Eigen::internal::conditional_aligned_malloc(size); \ } \ - void operator delete(void * ptr) throw() { Eigen::ei_conditional_aligned_free(ptr); } \ - void operator delete[](void * ptr) throw() { Eigen::ei_conditional_aligned_free(ptr); } \ + void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free(ptr); } \ + void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free(ptr); } \ /* in-place new and delete. since (at least afaik) there is no actual */ \ /* memory allocated we can safely let the default implementation handle */ \ /* this particular case. */ \ @@ -484,9 +536,9 @@ inline static Index ei_first_aligned(const Scalar* array, Index size) /* nothrow-new (returns zero instead of std::bad_alloc) */ \ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ void operator delete(void *ptr, const std::nothrow_t&) throw() { \ - Eigen::ei_conditional_aligned_free(ptr); \ + Eigen::internal::conditional_aligned_free(ptr); \ } \ - typedef void ei_operator_new_marker_type; + typedef void eigen_aligned_operator_new_marker_type; #else #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) #endif @@ -505,11 +557,13 @@ inline static Index ei_first_aligned(const Scalar* array, Index size) * Example: * \code * // Matrix4f requires 16 bytes alignment: -* std::map< int, Matrix4f, std::less, aligned_allocator > my_map_mat4; +* std::map< int, Matrix4f, std::less, +* aligned_allocator > > my_map_mat4; * // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: * std::map< int, Vector3f > my_map_vec3; * \endcode * +* \sa \ref TopicStlContainers. */ template class aligned_allocator @@ -564,7 +618,7 @@ public: pointer allocate( size_type num, const_pointer* hint = 0 ) { static_cast( hint ); // suppress unused variable warning - return static_cast( ei_aligned_malloc( num * sizeof(T) ) ); + return static_cast( internal::aligned_malloc( num * sizeof(T) ) ); } void construct( pointer p, const T& value ) @@ -579,7 +633,7 @@ public: void deallocate( pointer p, size_type /*num*/ ) { - ei_aligned_free( p ); + internal::aligned_free( p ); } bool operator!=(const aligned_allocator& ) const @@ -591,27 +645,32 @@ public: //---------- Cache sizes ---------- -#if defined(__GNUC__) +#if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) # if defined(__PIC__) && defined(__i386__) + // Case for x86 with PIC # define EIGEN_CPUID(abcd,func,id) \ __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); -# elif !defined(__arm__) && !defined(__powerpc__) +# else + // Case for x86_64 or x86 w/o PIC # define EIGEN_CPUID(abcd,func,id) \ __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) ); # endif #elif defined(_MSC_VER) -# if (_MSC_VER > 1500) /* newer than MSVC++ 9.0 */ || (_MSC_VER == 1500 && _MSC_FULL_VER >= 150030729) /* MSVC++ 9.0 with SP1*/ +# if (_MSC_VER > 1500) # define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) # endif #endif +namespace internal { + #ifdef EIGEN_CPUID -inline bool ei_cpuid_is_vendor(int abcd[4], const char* vendor) + +inline bool cpuid_is_vendor(int abcd[4], const char* vendor) { return abcd[1]==((int*)(vendor))[0] && abcd[3]==((int*)(vendor))[1] && abcd[2]==((int*)(vendor))[2]; } -inline void ei_queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) +inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) { int abcd[4]; l1 = l2 = l3 = 0; @@ -643,7 +702,7 @@ inline void ei_queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) } while(cache_type>0 && cache_id<16); } -inline void ei_queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) +inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) { int abcd[4]; abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; @@ -723,15 +782,15 @@ inline void ei_queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) l3 *= 1024; } -inline void ei_queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) +inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) { if(max_std_funcs>=4) - ei_queryCacheSizes_intel_direct(l1,l2,l3); + queryCacheSizes_intel_direct(l1,l2,l3); else - ei_queryCacheSizes_intel_codes(l1,l2,l3); + queryCacheSizes_intel_codes(l1,l2,l3); } -inline void ei_queryCacheSizes_amd(int& l1, int& l2, int& l3) +inline void queryCacheSizes_amd(int& l1, int& l2, int& l3) { int abcd[4]; abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; @@ -746,7 +805,7 @@ inline void ei_queryCacheSizes_amd(int& l1, int& l2, int& l3) /** \internal * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */ -inline void ei_queryCacheSizes(int& l1, int& l2, int& l3) +inline void queryCacheSizes(int& l1, int& l2, int& l3) { #ifdef EIGEN_CPUID int abcd[4]; @@ -754,27 +813,25 @@ inline void ei_queryCacheSizes(int& l1, int& l2, int& l3) // identify the CPU vendor EIGEN_CPUID(abcd,0x0,0); int max_std_funcs = abcd[1]; - if(ei_cpuid_is_vendor(abcd,"GenuineIntel")) - ei_queryCacheSizes_intel(l1,l2,l3,max_std_funcs); - else if(ei_cpuid_is_vendor(abcd,"AuthenticAMD") || ei_cpuid_is_vendor(abcd,"AMDisbetter!")) - ei_queryCacheSizes_amd(l1,l2,l3); + if(cpuid_is_vendor(abcd,"GenuineIntel")) + queryCacheSizes_intel(l1,l2,l3,max_std_funcs); + else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!")) + queryCacheSizes_amd(l1,l2,l3); else // by default let's use Intel's API - ei_queryCacheSizes_intel(l1,l2,l3,max_std_funcs); + queryCacheSizes_intel(l1,l2,l3,max_std_funcs); // here is the list of other vendors: -// ||ei_cpuid_is_vendor(abcd,"VIA VIA VIA ") -// ||ei_cpuid_is_vendor(abcd,"CyrixInstead") -// ||ei_cpuid_is_vendor(abcd,"CentaurHauls") -// ||ei_cpuid_is_vendor(abcd,"GenuineTMx86") -// ||ei_cpuid_is_vendor(abcd,"TransmetaCPU") -// ||ei_cpuid_is_vendor(abcd,"RiseRiseRise") -// ||ei_cpuid_is_vendor(abcd,"Geode by NSC") -// ||ei_cpuid_is_vendor(abcd,"SiS SiS SiS ") -// ||ei_cpuid_is_vendor(abcd,"UMC UMC UMC ") -// ||ei_cpuid_is_vendor(abcd,"NexGenDriven") -// ||ei_cpuid_is_vendor(abcd,"CentaurHauls") -// ||ei_cpuid_is_vendor(abcd,"CentaurHauls") +// ||cpuid_is_vendor(abcd,"VIA VIA VIA ") +// ||cpuid_is_vendor(abcd,"CyrixInstead") +// ||cpuid_is_vendor(abcd,"CentaurHauls") +// ||cpuid_is_vendor(abcd,"GenuineTMx86") +// ||cpuid_is_vendor(abcd,"TransmetaCPU") +// ||cpuid_is_vendor(abcd,"RiseRiseRise") +// ||cpuid_is_vendor(abcd,"Geode by NSC") +// ||cpuid_is_vendor(abcd,"SiS SiS SiS ") +// ||cpuid_is_vendor(abcd,"UMC UMC UMC ") +// ||cpuid_is_vendor(abcd,"NexGenDriven") #else l1 = l2 = l3 = -1; #endif @@ -782,20 +839,22 @@ inline void ei_queryCacheSizes(int& l1, int& l2, int& l3) /** \internal * \returns the size in Bytes of the L1 data cache */ -inline int ei_queryL1CacheSize() +inline int queryL1CacheSize() { int l1(-1), l2, l3; - ei_queryCacheSizes(l1,l2,l3); + queryCacheSizes(l1,l2,l3); return l1; } /** \internal * \returns the size in Bytes of the L2 or L3 cache if this later is present */ -inline int ei_queryTopLevelCacheSize() +inline int queryTopLevelCacheSize() { int l1, l2(-1), l3(-1); - ei_queryCacheSizes(l1,l2,l3); + queryCacheSizes(l1,l2,l3); return std::max(l2,l3); } +} // end namespace internal + #endif // EIGEN_MEMORY_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/Meta.h b/gtsam/3rdparty/Eigen/src/Core/util/Meta.h index 3d28680b6..4518261ef 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/Meta.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/Meta.h @@ -26,6 +26,8 @@ #ifndef EIGEN_META_H #define EIGEN_META_H +namespace internal { + /** \internal * \file Meta.h * This file contains generic metaprogramming classes which are not specifically related to Eigen. @@ -33,72 +35,72 @@ * we however don't want to add a dependency to Boost. */ -struct ei_meta_true { enum { ret = 1 }; }; -struct ei_meta_false { enum { ret = 0 }; }; +struct true_type { enum { value = 1 }; }; +struct false_type { enum { value = 0 }; }; template -struct ei_meta_if { typedef Then ret; }; +struct conditional { typedef Then type; }; template -struct ei_meta_if { typedef Else ret; }; +struct conditional { typedef Else type; }; -template struct ei_is_same_type { enum { ret = 0 }; }; -template struct ei_is_same_type { enum { ret = 1 }; }; +template struct is_same { enum { value = 0 }; }; +template struct is_same { enum { value = 1 }; }; -template struct ei_unref { typedef T type; }; -template struct ei_unref { typedef T type; }; +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; -template struct ei_unpointer { typedef T type; }; -template struct ei_unpointer { typedef T type; }; -template struct ei_unpointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; -template struct ei_unconst { typedef T type; }; -template struct ei_unconst { typedef T type; }; -template struct ei_unconst { typedef T & type; }; -template struct ei_unconst { typedef T * type; }; +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type[]; }; +template struct remove_const { typedef T type[Size]; }; -template struct ei_cleantype { typedef T type; }; -template struct ei_cleantype { typedef typename ei_cleantype::type type; }; -template struct ei_cleantype { typedef typename ei_cleantype::type type; }; -template struct ei_cleantype { typedef typename ei_cleantype::type type; }; -template struct ei_cleantype { typedef typename ei_cleantype::type type; }; -template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct remove_all { typedef T type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; -template struct ei_is_arithmetic { enum { ret = false }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic{ enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; -template<> struct ei_is_arithmetic { enum { ret = true }; }; +template struct is_arithmetic { enum { value = false }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic{ enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; -template struct ei_makeconst { typedef const T type; }; -template struct ei_makeconst { typedef const T type; }; -template struct ei_makeconst { typedef const T& type; }; -template struct ei_makeconst { typedef const T& type; }; -template struct ei_makeconst { typedef const T* type; }; -template struct ei_makeconst { typedef const T* type; }; +template struct add_const { typedef const T type; }; +template struct add_const { typedef T& type; }; -template struct ei_makeconst_return_type -{ - typedef typename ei_meta_if::ret, T, typename ei_makeconst::type>::ret type; -}; +template struct is_const { enum { value = 0 }; }; +template struct is_const { enum { value = 1 }; }; + +template struct add_const_on_value_type { typedef const T type; }; +template struct add_const_on_value_type { typedef T const& type; }; +template struct add_const_on_value_type { typedef T const* type; }; +template struct add_const_on_value_type { typedef T const* const type; }; +template struct add_const_on_value_type { typedef T const* const type; }; /** \internal Allows to enable/disable an overload * according to a compile time condition. */ -template struct ei_enable_if; +template struct enable_if; -template struct ei_enable_if +template struct enable_if { typedef T type; }; /** \internal @@ -108,67 +110,67 @@ template struct ei_enable_if * upcoming next STL generation (using a templated result member). * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack. */ -template struct ei_result_of {}; +template struct result_of {}; -struct ei_has_none {int a[1];}; -struct ei_has_std_result_type {int a[2];}; -struct ei_has_tr1_result {int a[3];}; +struct has_none {int a[1];}; +struct has_std_result_type {int a[2];}; +struct has_tr1_result {int a[3];}; -template -struct ei_unary_result_of_select {typedef ArgType type;}; +template +struct unary_result_of_select {typedef ArgType type;}; template -struct ei_unary_result_of_select {typedef typename Func::result_type type;}; +struct unary_result_of_select {typedef typename Func::result_type type;}; template -struct ei_unary_result_of_select {typedef typename Func::template result::type type;}; +struct unary_result_of_select {typedef typename Func::template result::type type;}; template -struct ei_result_of { +struct result_of { template - static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); template - static ei_has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); - static ei_has_none testFunctor(...); + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); // note that the following indirection is needed for gcc-3.3 enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; - typedef typename ei_unary_result_of_select::type type; + typedef typename unary_result_of_select::type type; }; -template -struct ei_binary_result_of_select {typedef ArgType0 type;}; +template +struct binary_result_of_select {typedef ArgType0 type;}; template -struct ei_binary_result_of_select +struct binary_result_of_select {typedef typename Func::result_type type;}; template -struct ei_binary_result_of_select +struct binary_result_of_select {typedef typename Func::template result::type type;}; template -struct ei_result_of { +struct result_of { template - static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); template - static ei_has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); - static ei_has_none testFunctor(...); + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); // note that the following indirection is needed for gcc-3.3 enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; - typedef typename ei_binary_result_of_select::type type; + typedef typename binary_result_of_select::type type; }; /** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer. - * Usage example: \code ei_meta_sqrt<1023>::ret \endcode + * Usage example: \code meta_sqrt<1023>::ret \endcode */ template Y))) > // use ?: instead of || just to shut up a stupid gcc 4.3 warning -class ei_meta_sqrt +class meta_sqrt { enum { MidX = (InfX+SupX)/2, @@ -177,49 +179,51 @@ class ei_meta_sqrt NewSup = int(TakeInf) ? int(MidX) : SupX }; public: - enum { ret = ei_meta_sqrt::ret }; + enum { ret = meta_sqrt::ret }; }; template -class ei_meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; +class meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; /** \internal determines whether the product of two numeric types is allowed and what the return type is */ -template struct ei_scalar_product_traits; +template struct scalar_product_traits; -template struct ei_scalar_product_traits +template struct scalar_product_traits { //enum { Cost = NumTraits::MulCost }; typedef T ReturnType; }; -template struct ei_scalar_product_traits > +template struct scalar_product_traits > { //enum { Cost = 2*NumTraits::MulCost }; typedef std::complex ReturnType; }; -template struct ei_scalar_product_traits, T> +template struct scalar_product_traits, T> { //enum { Cost = 2*NumTraits::MulCost }; typedef std::complex ReturnType; }; -// FIXME quick workaround around current limitation of ei_result_of +// FIXME quick workaround around current limitation of result_of // template -// struct ei_result_of(ArgType0,ArgType1)> { -// typedef typename ei_scalar_product_traits::type, typename ei_cleantype::type>::ReturnType type; +// struct result_of(ArgType0,ArgType1)> { +// typedef typename scalar_product_traits::type, typename remove_all::type>::ReturnType type; // }; -template struct ei_is_diagonal +template struct is_diagonal { enum { ret = false }; }; -template struct ei_is_diagonal > +template struct is_diagonal > { enum { ret = true }; }; -template struct ei_is_diagonal > +template struct is_diagonal > { enum { ret = true }; }; -template struct ei_is_diagonal > +template struct is_diagonal > { enum { ret = true }; }; +} // end namespace internal + #endif // EIGEN_META_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/ReenableStupidWarnings.h b/gtsam/3rdparty/Eigen/src/Core/util/ReenableStupidWarnings.h new file mode 100644 index 000000000..5ddfbd4aa --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Core/util/ReenableStupidWarnings.h @@ -0,0 +1,14 @@ +#ifdef EIGEN_WARNINGS_DISABLED +#undef EIGEN_WARNINGS_DISABLED + +#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #ifdef _MSC_VER + #pragma warning( pop ) + #elif defined __INTEL_COMPILER + #pragma warning pop + #elif defined __clang__ + #pragma clang diagnostic pop + #endif +#endif + +#endif // EIGEN_WARNINGS_DISABLED diff --git a/gtsam/3rdparty/Eigen/src/Core/util/StaticAssert.h b/gtsam/3rdparty/Eigen/src/Core/util/StaticAssert.h index 323273e6a..99c7c9972 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/StaticAssert.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/StaticAssert.h @@ -29,11 +29,11 @@ /* Some notes on Eigen's static assertion mechanism: * * - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean - * expression, and MSG an enum listed in struct ei_static_assert + * expression, and MSG an enum listed in struct internal::static_assertion * * - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time) * in that case, the static assertion is converted to the following runtime assert: - * ei_assert(CONDITION && "MSG") + * eigen_assert(CONDITION && "MSG") * * - currently EIGEN_STATIC_ASSERT can only be used in function scope * @@ -48,11 +48,13 @@ #else // not CXX0X + namespace internal { + template - struct ei_static_assert {}; + struct static_assertion {}; template<> - struct ei_static_assert + struct static_assertion { enum { YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX, @@ -90,22 +92,27 @@ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1, THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS, YOU_CANNOT_MIX_ARRAYS_AND_MATRICES, - YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION + YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION, + THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY, + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT, + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS }; }; + } // end namespace internal + // Specialized implementation for MSVC to avoid "conditional // expression is constant" warnings. This implementation doesn't // appear to work under GCC, hence the multiple implementations. #ifdef _MSC_VER #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ - {Eigen::ei_static_assert<(CONDITION)>::MSG;} + {Eigen::internal::static_assertion::MSG;} #else #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ - if (Eigen::ei_static_assert<(CONDITION)>::MSG) {} + if (Eigen::internal::static_assertion::MSG) {} #endif @@ -113,7 +120,7 @@ #else // EIGEN_NO_STATIC_ASSERT - #define EIGEN_STATIC_ASSERT(CONDITION,MSG) ei_assert((CONDITION) && #MSG); + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG); #endif // EIGEN_NO_STATIC_ASSERT @@ -164,8 +171,14 @@ ) \ ) -#define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \ - EIGEN_STATIC_ASSERT(!NumTraits::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) +#ifdef EIGEN2_SUPPORT + #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \ + eigen_assert(!NumTraits::IsInteger); +#else + #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \ + EIGEN_STATIC_ASSERT(!NumTraits::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) +#endif + // static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes #define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ @@ -173,4 +186,13 @@ EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\ YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES) +#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \ + EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \ + (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \ + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS) + +#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \ + EIGEN_STATIC_ASSERT(internal::is_lvalue::value, \ + THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY) + #endif // EIGEN_STATIC_ASSERT_H diff --git a/gtsam/3rdparty/Eigen/src/Core/util/XprHelper.h b/gtsam/3rdparty/Eigen/src/Core/util/XprHelper.h index 61ffe702f..9047c5f83 100644 --- a/gtsam/3rdparty/Eigen/src/Core/util/XprHelper.h +++ b/gtsam/3rdparty/Eigen/src/Core/util/XprHelper.h @@ -1,4 +1,4 @@ -// // This file is part of Eigen, a lightweight C++ template library +// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud @@ -37,46 +37,48 @@ #define EIGEN_EMPTY_STRUCT_CTOR(X) #endif -//classes inheriting ei_no_assignment_operator don't generate a default operator=. -class ei_no_assignment_operator +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; + +namespace internal { + +//classes inheriting no_assignment_operator don't generate a default operator=. +class no_assignment_operator { private: - ei_no_assignment_operator& operator=(const ei_no_assignment_operator&); + no_assignment_operator& operator=(const no_assignment_operator&); }; -typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; - /** \internal return the index type with the largest number of bits */ template -struct ei_promote_index_type +struct promote_index_type { - typedef typename ei_meta_if<(sizeof(I1)::ret type; + typedef typename conditional<(sizeof(I1)::type type; }; /** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that * can be accessed using value() and setValue(). * Otherwise, this class is an empty structure and value() just returns the template parameter Value. */ -template class ei_variable_if_dynamic +template class variable_if_dynamic { public: - EIGEN_EMPTY_STRUCT_CTOR(ei_variable_if_dynamic) - explicit ei_variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == T(Value)); } + EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic) + explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); } static T value() { return T(Value); } void setValue(T) {} }; -template class ei_variable_if_dynamic +template class variable_if_dynamic { T m_value; - ei_variable_if_dynamic() { ei_assert(false); } + variable_if_dynamic() { assert(false); } public: - explicit ei_variable_if_dynamic(T value) : m_value(value) {} + explicit variable_if_dynamic(T value) : m_value(value) {} T value() const { return m_value; } void setValue(T value) { m_value = value; } }; -template struct ei_functor_traits +template struct functor_traits { enum { @@ -85,9 +87,9 @@ template struct ei_functor_traits }; }; -template struct ei_packet_traits; +template struct packet_traits; -template struct ei_unpacket_traits +template struct unpacket_traits { typedef T type; enum {size=1}; @@ -100,7 +102,7 @@ template class ei_make_proper_matrix_type +> class make_proper_matrix_type { enum { IsColVector = _Cols==1 && _Rows!=1, @@ -114,7 +116,7 @@ template -class ei_compute_matrix_flags +class compute_matrix_flags { enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0, @@ -123,10 +125,10 @@ class ei_compute_matrix_flags aligned_bit = ( ((Options&DontAlign)==0) - && ei_packet_traits::Vectorizable + && packet_traits::Vectorizable && ( #if EIGEN_ALIGN_STATICALLY - ((!is_dynamic_size_storage) && (((MaxCols*MaxRows) % ei_packet_traits::size) == 0)) + ((!is_dynamic_size_storage) && (((MaxCols*MaxRows) % packet_traits::size) == 0)) #else 0 #endif @@ -141,95 +143,95 @@ class ei_compute_matrix_flags ) ) ? AlignedBit : 0, - packet_access_bit = ei_packet_traits::Vectorizable && aligned_bit ? PacketAccessBit : 0 + packet_access_bit = packet_traits::Vectorizable && aligned_bit ? PacketAccessBit : 0 }; public: enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit }; }; -template struct ei_size_at_compile_time +template struct size_at_compile_time { enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols }; }; -/* ei_plain_matrix_type : the difference from ei_eval is that ei_plain_matrix_type is always a plain matrix type, - * whereas ei_eval is a const reference in the case of a matrix +/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type, + * whereas eval is a const reference in the case of a matrix */ -template::StorageKind> struct ei_plain_matrix_type; -template struct ei_plain_matrix_type_dense; -template struct ei_plain_matrix_type +template::StorageKind> struct plain_matrix_type; +template struct plain_matrix_type_dense; +template struct plain_matrix_type { - typedef typename ei_plain_matrix_type_dense::XprKind>::type type; + typedef typename plain_matrix_type_dense::XprKind>::type type; }; -template struct ei_plain_matrix_type_dense +template struct plain_matrix_type_dense { - typedef Matrix::Scalar, - ei_traits::RowsAtCompileTime, - ei_traits::ColsAtCompileTime, - AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), - ei_traits::MaxRowsAtCompileTime, - ei_traits::MaxColsAtCompileTime + typedef Matrix::Scalar, + traits::RowsAtCompileTime, + traits::ColsAtCompileTime, + AutoAlign | (traits::Flags&RowMajorBit ? RowMajor : ColMajor), + traits::MaxRowsAtCompileTime, + traits::MaxColsAtCompileTime > type; }; -template struct ei_plain_matrix_type_dense +template struct plain_matrix_type_dense { - typedef Array::Scalar, - ei_traits::RowsAtCompileTime, - ei_traits::ColsAtCompileTime, - AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), - ei_traits::MaxRowsAtCompileTime, - ei_traits::MaxColsAtCompileTime + typedef Array::Scalar, + traits::RowsAtCompileTime, + traits::ColsAtCompileTime, + AutoAlign | (traits::Flags&RowMajorBit ? RowMajor : ColMajor), + traits::MaxRowsAtCompileTime, + traits::MaxColsAtCompileTime > type; }; -/* ei_eval : the return type of eval(). For matrices, this is just a const reference +/* eval : the return type of eval(). For matrices, this is just a const reference * in order to avoid a useless copy */ -template::StorageKind> struct ei_eval; +template::StorageKind> struct eval; -template struct ei_eval +template struct eval { - typedef typename ei_plain_matrix_type::type type; + typedef typename plain_matrix_type::type type; // typedef typename T::PlainObject type; -// typedef T::Matrix::Scalar, -// ei_traits::RowsAtCompileTime, -// ei_traits::ColsAtCompileTime, -// AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), -// ei_traits::MaxRowsAtCompileTime, -// ei_traits::MaxColsAtCompileTime +// typedef T::Matrix::Scalar, +// traits::RowsAtCompileTime, +// traits::ColsAtCompileTime, +// AutoAlign | (traits::Flags&RowMajorBit ? RowMajor : ColMajor), +// traits::MaxRowsAtCompileTime, +// traits::MaxColsAtCompileTime // > type; }; // for matrices, no need to evaluate, just use a const reference to avoid a useless copy template -struct ei_eval, Dense> +struct eval, Dense> { typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type; }; template -struct ei_eval, Dense> +struct eval, Dense> { typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type; }; -/* ei_plain_matrix_type_column_major : same as ei_plain_matrix_type but guaranteed to be column-major +/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major */ -template struct ei_plain_matrix_type_column_major +template struct plain_matrix_type_column_major { - enum { Rows = ei_traits::RowsAtCompileTime, - Cols = ei_traits::ColsAtCompileTime, - MaxRows = ei_traits::MaxRowsAtCompileTime, - MaxCols = ei_traits::MaxColsAtCompileTime + enum { Rows = traits::RowsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxRows = traits::MaxRowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime }; - typedef Matrix::Scalar, + typedef Matrix::Scalar, Rows, Cols, (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor, @@ -238,16 +240,16 @@ template struct ei_plain_matrix_type_column_major > type; }; -/* ei_plain_matrix_type_row_major : same as ei_plain_matrix_type but guaranteed to be row-major +/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major */ -template struct ei_plain_matrix_type_row_major +template struct plain_matrix_type_row_major { - enum { Rows = ei_traits::RowsAtCompileTime, - Cols = ei_traits::ColsAtCompileTime, - MaxRows = ei_traits::MaxRowsAtCompileTime, - MaxCols = ei_traits::MaxColsAtCompileTime + enum { Rows = traits::RowsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxRows = traits::MaxRowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime }; - typedef Matrix::Scalar, + typedef Matrix::Scalar, Rows, Cols, (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor, @@ -257,16 +259,16 @@ template struct ei_plain_matrix_type_row_major }; // we should be able to get rid of this one too -template struct ei_must_nest_by_value { enum { ret = false }; }; +template struct must_nest_by_value { enum { ret = false }; }; template -struct ei_is_reference +struct is_reference { enum { ret = false }; }; template -struct ei_is_reference +struct is_reference { enum { ret = true }; }; @@ -277,13 +279,13 @@ struct ei_is_reference * objects which should generate no copying overhead. **/ template -struct ei_ref_selector +struct ref_selector { - typedef typename ei_meta_if< - bool(ei_traits::Flags & NestByRefBit), + typedef typename conditional< + bool(traits::Flags & NestByRefBit), T const&, T - >::ret type; + >::type type; }; /** \internal Determines how a given expression should be nested into another one. @@ -298,70 +300,59 @@ struct ei_ref_selector * * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c). * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it, - * the Product expression uses: ei_nested::ret, which turns out to be Matrix3d because the internal logic of - * ei_nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand, - * since a is of type Matrix3d, the Product expression nests it as ei_nested::ret, which turns out to be - * const Matrix3d&, because the internal logic of ei_nested determined that since a was already a matrix, there was no point + * the Product expression uses: nested::ret, which turns out to be Matrix3d because the internal logic of + * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand, + * since a is of type Matrix3d, the Product expression nests it as nested::ret, which turns out to be + * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point * in copying it into another matrix. */ -template::type> struct ei_nested +template::type> struct nested { - // this is a direct port of the logic used when Dynamic was 33331, to make an atomic commit. enum { - _ScalarReadCost = NumTraits::Scalar>::ReadCost, - ScalarReadCost = _ScalarReadCost == Dynamic ? 33331 : int(_ScalarReadCost), - _CoeffReadCost = int(ei_traits::CoeffReadCost), - CoeffReadCost = _CoeffReadCost == Dynamic ? 33331 : int(_CoeffReadCost), - N = n == Dynamic ? 33331 : n, - CostEval = (N+1) * int(ScalarReadCost), - CostNoEval = (N-1) * int(CoeffReadCost) + // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values. + // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values. + // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues + // (poor choice of temporaries). + // it's important that this value can still be squared without integer overflowing. + DynamicAsInteger = 10000, + ScalarReadCost = NumTraits::Scalar>::ReadCost, + ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? DynamicAsInteger : ScalarReadCost, + CoeffReadCost = traits::CoeffReadCost, + CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? DynamicAsInteger : CoeffReadCost, + NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n, + CostEvalAsInteger = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger, + CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger }; - typedef typename ei_meta_if< - ( int(ei_traits::Flags) & EvalBeforeNestingBit ) || - ( int(CostEval) <= int(CostNoEval) ), + typedef typename conditional< + ( (int(traits::Flags) & EvalBeforeNestingBit) || + int(CostEvalAsInteger) < int(CostNoEvalAsInteger) + ), PlainObject, - typename ei_ref_selector::type - >::ret type; - -/* this is what the above logic should be updated to look like: - enum { - ScalarReadCost = NumTraits::Scalar>::ReadCost, - CoeffReadCost = ei_traits::CoeffReadCost, - CostEval = n == Dynamic || ScalarReadCost == Dynamic ? int(Dynamic) : (n+1) * int(ScalarReadCost), - CostNoEval = n == Dynamic || (CoeffReadCost == Dynamic && n>1) ? int(Dynamic) : (n-1) * int(CoeffReadCost) - }; - - typedef typename ei_meta_if< - ( int(ei_traits::Flags) & EvalBeforeNestingBit ) || - ( int(CostNoEval) == Dynamic ? true - : int(CostEval) == Dynamic ? false - : int(CostEval) <= int(CostNoEval) ), - PlainObject, - typename ei_ref_selector::type - >::ret type; -*/ + typename ref_selector::type + >::type type; }; -template struct ei_are_flags_consistent +template +T* const_cast_ptr(const T* ptr) { - enum { ret = EIGEN_IMPLIES(bool(Flags&DirectAccessBit), bool(Flags&LvalueBit)) }; -}; + return const_cast(ptr); +} -template::XprKind> -struct ei_dense_xpr_base +template::XprKind> +struct dense_xpr_base { - /* ei_dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */ + /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */ }; template -struct ei_dense_xpr_base +struct dense_xpr_base { typedef MatrixBase type; }; template -struct ei_dense_xpr_base +struct dense_xpr_base { typedef ArrayBase type; }; @@ -369,82 +360,101 @@ struct ei_dense_xpr_base /** \internal Helper base class to add a scalar multiple operator * overloads for complex types */ template::ret > -struct ei_special_scalar_op_base : public DenseCoeffsBase + bool EnableIt = !is_same::value > +struct special_scalar_op_base : public DenseCoeffsBase { // dummy operator* so that the - // "using ei_special_scalar_op_base::operator*" compiles + // "using special_scalar_op_base::operator*" compiles void operator*() const; }; template -struct ei_special_scalar_op_base : public DenseCoeffsBase +struct special_scalar_op_base : public DenseCoeffsBase { - const CwiseUnaryOp, Derived> + const CwiseUnaryOp, Derived> operator*(const OtherScalar& scalar) const { - return CwiseUnaryOp, Derived> - (*static_cast(this), ei_scalar_multiple2_op(scalar)); + return CwiseUnaryOp, Derived> + (*static_cast(this), scalar_multiple2_op(scalar)); } - inline friend const CwiseUnaryOp, Derived> + inline friend const CwiseUnaryOp, Derived> operator*(const OtherScalar& scalar, const Derived& matrix) - { return static_cast(matrix).operator*(scalar); } + { return static_cast(matrix).operator*(scalar); } }; -template struct HNormalizedReturnType { - - enum { - SizeAtCompileTime = ExpressionType::SizeAtCompileTime, - SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 - }; - typedef Block::ColsAtCompileTime==1 ? SizeMinusOne : 1, - ei_traits::ColsAtCompileTime==1 ? 1 : SizeMinusOne> StartMinusOne; - typedef CwiseUnaryOp::Scalar>, - StartMinusOne > Type; -}; - -template struct ei_cast_return_type +template struct cast_return_type { typedef typename XprType::Scalar CurrentScalarType; - typedef typename ei_cleantype::type _CastType; + typedef typename remove_all::type _CastType; typedef typename _CastType::Scalar NewScalarType; - typedef typename ei_meta_if::ret, - const XprType&,CastType>::ret type; + typedef typename conditional::value, + const XprType&,CastType>::type type; }; -template struct ei_promote_storage_type; +template struct promote_storage_type; -template struct ei_promote_storage_type +template struct promote_storage_type { typedef A ret; }; -/** \internal gives the plain matrix type to store a row/column/diagonal of a matrix type. +/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type. * \param Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType. */ -template -struct ei_plain_row_type +template +struct plain_row_type { - typedef Matrix type; + typedef Matrix MatrixRowType; + typedef Array ArrayRowType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixRowType, + ArrayRowType + >::type type; }; -template -struct ei_plain_col_type +template +struct plain_col_type { - typedef Matrix type; + typedef Matrix MatrixColType; + typedef Array ArrayColType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixColType, + ArrayColType + >::type type; }; -template -struct ei_plain_diag_type +template +struct plain_diag_type { - enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime), - max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) + enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime), + max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime) }; - typedef Matrix type; + typedef Matrix MatrixDiagType; + typedef Array ArrayDiagType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixDiagType, + ArrayDiagType + >::type type; }; +template +struct is_lvalue +{ + enum { value = !bool(is_const::value) && + bool(traits::Flags & LvalueBit) }; +}; + +} // end namespace internal + #endif // EIGEN_XPRHELPER_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Block.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Block.h index ba0e39650..bc28051e0 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/Block.h +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Block.h @@ -49,7 +49,7 @@ inline Block DenseBase switch(type) { default: - ei_assert(false && "Bad corner type."); + eigen_assert(false && "Bad corner type."); case TopLeft: return Block(derived(), 0, 0, cRows, cCols); case TopRight: @@ -69,7 +69,7 @@ DenseBase::corner(CornerType type, Index cRows, Index cCols) const switch(type) { default: - ei_assert(false && "Bad corner type."); + eigen_assert(false && "Bad corner type."); case TopLeft: return Block(derived(), 0, 0, cRows, cCols); case TopRight: @@ -101,7 +101,7 @@ DenseBase::corner(CornerType type) switch(type) { default: - ei_assert(false && "Bad corner type."); + eigen_assert(false && "Bad corner type."); case TopLeft: return Block(derived(), 0, 0); case TopRight: @@ -122,7 +122,7 @@ DenseBase::corner(CornerType type) const switch(type) { default: - ei_assert(false && "Bad corner type."); + eigen_assert(false && "Bad corner type."); case TopLeft: return Block(derived(), 0, 0); case TopRight: diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/CMakeLists.txt b/gtsam/3rdparty/Eigen/src/Eigen2Support/CMakeLists.txt index 2d635042e..7ae41b3cb 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/CMakeLists.txt @@ -4,3 +4,5 @@ INSTALL(FILES ${Eigen_Eigen2Support_SRCS} DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigen2Support COMPONENT Devel ) + +ADD_SUBDIRECTORY(Geometry) \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Cwise.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Cwise.h index 1489f8f79..c619d389c 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/Cwise.h +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Cwise.h @@ -29,17 +29,17 @@ /** \internal * convenient macro to defined the return type of a cwise binary operation */ #define EIGEN_CWISE_BINOP_RETURN_TYPE(OP) \ - CwiseBinaryOp::Scalar>, ExpressionType, OtherDerived> + CwiseBinaryOp::Scalar>, ExpressionType, OtherDerived> /** \internal * convenient macro to defined the return type of a cwise unary operation */ #define EIGEN_CWISE_UNOP_RETURN_TYPE(OP) \ - CwiseUnaryOp::Scalar>, ExpressionType> + CwiseUnaryOp::Scalar>, ExpressionType> /** \internal * convenient macro to defined the return type of a cwise comparison to a scalar */ #define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \ - CwiseBinaryOp::Scalar>, ExpressionType, \ + CwiseBinaryOp::Scalar>, ExpressionType, \ typename ExpressionType::ConstantReturnType > /** \class Cwise @@ -55,16 +55,19 @@ * Example: \include MatrixBase_cwise_const.cpp * Output: \verbinclude MatrixBase_cwise_const.out * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_CWISE_PLUGIN. + * * \sa MatrixBase::cwise() const, MatrixBase::cwise() */ template class Cwise { public: - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_meta_if::ret, - ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; - typedef CwiseUnaryOp, ExpressionType> ScalarAddReturnType; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::conditional::ret, + ExpressionType, const ExpressionType&>::type ExpressionTypeNested; + typedef CwiseUnaryOp, ExpressionType> ScalarAddReturnType; inline Cwise(const ExpressionType& matrix) : m_matrix(matrix) {} @@ -76,28 +79,28 @@ template class Cwise operator*(const MatrixBase &other) const; template - const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) + const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) operator/(const MatrixBase &other) const; template - const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op) + const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op) min(const MatrixBase &other) const; template - const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op) + const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op) max(const MatrixBase &other) const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) abs() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) abs2() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) square() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) cube() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) inverse() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) sqrt() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) exp() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) log() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) cos() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) sin() const; - const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) pow(const Scalar& exponent) const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_square_op) square() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cube_op) cube() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_inverse_op) inverse() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sqrt_op) sqrt() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_exp_op) exp() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_log_op) log() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cos_op) cos() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sin_op) sin() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op) pow(const Scalar& exponent) const; const ScalarAddReturnType operator+(const Scalar& scalar) const; diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/CwiseOperators.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/CwiseOperators.h index 74feee4a0..0c7e9db6d 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/CwiseOperators.h +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/CwiseOperators.h @@ -32,7 +32,7 @@ /** \deprecated ArrayBase::abs() */ template -EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) +EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) Cwise::abs() const { return _expression(); @@ -40,7 +40,7 @@ Cwise::abs() const /** \deprecated ArrayBase::abs2() */ template -EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) +EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) Cwise::abs2() const { return _expression(); @@ -48,7 +48,7 @@ Cwise::abs2() const /** \deprecated ArrayBase::exp() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_exp_op) Cwise::exp() const { return _expression(); @@ -56,7 +56,7 @@ Cwise::exp() const /** \deprecated ArrayBase::log() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_log_op) Cwise::log() const { return _expression(); @@ -74,10 +74,10 @@ Cwise::operator*(const MatrixBase &other) const /** \deprecated ArrayBase::operator/() */ template template -EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) Cwise::operator/(const MatrixBase &other) const { - return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived()); + return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived()); } /** \deprecated ArrayBase::operator*=() */ @@ -99,19 +99,19 @@ inline ExpressionType& Cwise::operator/=(const MatrixBase template -EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op) +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op) Cwise::min(const MatrixBase &other) const { - return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived()); + return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); } /** \deprecated ArrayBase::max() */ template template -EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op) +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op) Cwise::max(const MatrixBase &other) const { - return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived()); + return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); } /*************************************************************************** @@ -122,7 +122,7 @@ Cwise::max(const MatrixBase &other) const /** \deprecated ArrayBase::sqrt() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sqrt_op) Cwise::sqrt() const { return _expression(); @@ -130,7 +130,7 @@ Cwise::sqrt() const /** \deprecated ArrayBase::cos() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cos_op) Cwise::cos() const { return _expression(); @@ -139,7 +139,7 @@ Cwise::cos() const /** \deprecated ArrayBase::sin() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sin_op) Cwise::sin() const { return _expression(); @@ -148,16 +148,16 @@ Cwise::sin() const /** \deprecated ArrayBase::log() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op) Cwise::pow(const Scalar& exponent) const { - return EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)(_expression(), ei_scalar_pow_op(exponent)); + return EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op)(_expression(), internal::scalar_pow_op(exponent)); } /** \deprecated ArrayBase::inverse() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_inverse_op) Cwise::inverse() const { return _expression(); @@ -165,7 +165,7 @@ Cwise::inverse() const /** \deprecated ArrayBase::square() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_square_op) Cwise::square() const { return _expression(); @@ -173,7 +173,7 @@ Cwise::square() const /** \deprecated ArrayBase::cube() */ template -inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cube_op) Cwise::cube() const { return _expression(); @@ -299,7 +299,7 @@ template inline const typename Cwise::ScalarAddReturnType Cwise::operator+(const Scalar& scalar) const { - return typename Cwise::ScalarAddReturnType(m_matrix, ei_scalar_add_op(scalar)); + return typename Cwise::ScalarAddReturnType(m_matrix, internal::scalar_add_op(scalar)); } /** \deprecated ArrayBase::operator+=(Scalar) */ diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AlignedBox.h new file mode 100644 index 000000000..1c915be22 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AlignedBox.h @@ -0,0 +1,170 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + +/** \geometry_module \ingroup Geometry_Module + * \nonstableyet + * + * \class AlignedBox + * + * \brief An axis aligned box + * + * \param _Scalar the type of the scalar coefficients + * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * + * This class represents an axis aligned box as a pair of the minimal and maximal corners. + */ +template +class AlignedBox +{ +public: +EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + /** Default constructor initializing a null box. */ + inline explicit AlignedBox() + { if (AmbientDimAtCompileTime!=Dynamic) setNull(); } + + /** Constructs a null box with \a _dim the dimension of the ambient space. */ + inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim) + { setNull(); } + + /** Constructs a box with extremities \a _min and \a _max. */ + inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {} + + /** Constructs a box containing a single point \a p. */ + inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {} + + ~AlignedBox() {} + + /** \returns the dimension in which the box holds */ + inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; } + + /** \returns true if the box is null, i.e, empty. */ + inline bool isNull() const { return (m_min.cwise() > m_max).any(); } + + /** Makes \c *this a null/empty box. */ + inline void setNull() + { + m_min.setConstant( std::numeric_limits::max()); + m_max.setConstant(-std::numeric_limits::max()); + } + + /** \returns the minimal corner */ + inline const VectorType& min() const { return m_min; } + /** \returns a non const reference to the minimal corner */ + inline VectorType& min() { return m_min; } + /** \returns the maximal corner */ + inline const VectorType& max() const { return m_max; } + /** \returns a non const reference to the maximal corner */ + inline VectorType& max() { return m_max; } + + /** \returns true if the point \a p is inside the box \c *this. */ + inline bool contains(const VectorType& p) const + { return (m_min.cwise()<=p).all() && (p.cwise()<=m_max).all(); } + + /** \returns true if the box \a b is entirely inside the box \c *this. */ + inline bool contains(const AlignedBox& b) const + { return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); } + + /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ + inline AlignedBox& extend(const VectorType& p) + { m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; } + + /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */ + inline AlignedBox& extend(const AlignedBox& b) + { m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; } + + /** Clamps \c *this by the box \a b and returns a reference to \c *this. */ + inline AlignedBox& clamp(const AlignedBox& b) + { m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; } + + /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ + inline AlignedBox& translate(const VectorType& t) + { m_min += t; m_max += t; return *this; } + + /** \returns the squared distance between the point \a p and the box \c *this, + * and zero if \a p is inside the box. + * \sa exteriorDistance() + */ + inline Scalar squaredExteriorDistance(const VectorType& p) const; + + /** \returns the distance between the point \a p and the box \c *this, + * and zero if \a p is inside the box. + * \sa squaredExteriorDistance() + */ + inline Scalar exteriorDistance(const VectorType& p) const + { return ei_sqrt(squaredExteriorDistance(p)); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + inline explicit AlignedBox(const AlignedBox& other) + { + m_min = other.min().template cast(); + m_max = other.max().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const AlignedBox& other, typename NumTraits::Real prec = precision()) const + { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); } + +protected: + + VectorType m_min, m_max; +}; + +template +inline Scalar AlignedBox::squaredExteriorDistance(const VectorType& p) const +{ + Scalar dist2 = 0.; + Scalar aux; + for (int k=0; k + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS +#include "RotationBase.h" +#include "Rotation2D.h" +#include "Quaternion.h" +#include "AngleAxis.h" +#include "Transform.h" +#include "Translation.h" +#include "Scaling.h" +#include "AlignedBox.h" +#include "Hyperplane.h" +#include "ParametrizedLine.h" +#endif + + +#define RotationBase eigen2_RotationBase +#define Rotation2D eigen2_Rotation2D +#define Rotation2Df eigen2_Rotation2Df +#define Rotation2Dd eigen2_Rotation2Dd + +#define Quaternion eigen2_Quaternion +#define Quaternionf eigen2_Quaternionf +#define Quaterniond eigen2_Quaterniond + +#define AngleAxis eigen2_AngleAxis +#define AngleAxisf eigen2_AngleAxisf +#define AngleAxisd eigen2_AngleAxisd + +#define Transform eigen2_Transform +#define Transform2f eigen2_Transform2f +#define Transform2d eigen2_Transform2d +#define Transform3f eigen2_Transform3f +#define Transform3d eigen2_Transform3d + +#define Translation eigen2_Translation +#define Translation2f eigen2_Translation2f +#define Translation2d eigen2_Translation2d +#define Translation3f eigen2_Translation3f +#define Translation3d eigen2_Translation3d + +#define Scaling eigen2_Scaling +#define Scaling2f eigen2_Scaling2f +#define Scaling2d eigen2_Scaling2d +#define Scaling3f eigen2_Scaling3f +#define Scaling3d eigen2_Scaling3d + +#define AlignedBox eigen2_AlignedBox + +#define Hyperplane eigen2_Hyperplane +#define ParametrizedLine eigen2_ParametrizedLine + +#define ei_toRotationMatrix eigen2_ei_toRotationMatrix +#define ei_quaternion_assign_impl eigen2_ei_quaternion_assign_impl +#define ei_transform_product_impl eigen2_ei_transform_product_impl + +#include "RotationBase.h" +#include "Rotation2D.h" +#include "Quaternion.h" +#include "AngleAxis.h" +#include "Transform.h" +#include "Translation.h" +#include "Scaling.h" +#include "AlignedBox.h" +#include "Hyperplane.h" +#include "ParametrizedLine.h" + +#undef ei_toRotationMatrix +#undef ei_quaternion_assign_impl +#undef ei_transform_product_impl + +#undef RotationBase +#undef Rotation2D +#undef Rotation2Df +#undef Rotation2Dd + +#undef Quaternion +#undef Quaternionf +#undef Quaterniond + +#undef AngleAxis +#undef AngleAxisf +#undef AngleAxisd + +#undef Transform +#undef Transform2f +#undef Transform2d +#undef Transform3f +#undef Transform3d + +#undef Translation +#undef Translation2f +#undef Translation2d +#undef Translation3f +#undef Translation3d + +#undef Scaling +#undef Scaling2f +#undef Scaling2d +#undef Scaling3f +#undef Scaling3d + +#undef AlignedBox + +#undef Hyperplane +#undef ParametrizedLine + +#endif // EIGEN2_GEOMETRY_MODULE_H \ No newline at end of file diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AngleAxis.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AngleAxis.h new file mode 100644 index 000000000..f7b2d51e3 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/AngleAxis.h @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + + +/** \geometry_module \ingroup Geometry_Module + * + * \class AngleAxis + * + * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * + * The following two typedefs are provided for convenience: + * \li \c AngleAxisf for \c float + * \li \c AngleAxisd for \c double + * + * \addexample AngleAxisForEuler \label How to define a rotation from Euler-angles + * + * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily + * mimic Euler-angles. Here is an example: + * \include AngleAxis_mimic_euler.cpp + * Output: \verbinclude AngleAxis_mimic_euler.out + * + * \note This class is not aimed to be used to store a rotation transformation, + * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix) + * and transformation objects. + * + * \sa class Quaternion, class Transform, MatrixBase::UnitX() + */ + +template struct ei_traits > +{ + typedef _Scalar Scalar; +}; + +template +class AngleAxis : public RotationBase,3> +{ + typedef RotationBase,3> Base; + +public: + + using Base::operator*; + + enum { Dim = 3 }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Matrix Matrix3; + typedef Matrix Vector3; + typedef Quaternion QuaternionType; + +protected: + + Vector3 m_axis; + Scalar m_angle; + +public: + + /** Default constructor without initialization. */ + AngleAxis() {} + /** Constructs and initialize the angle-axis rotation from an \a angle in radian + * and an \a axis which must be normalized. */ + template + inline AngleAxis(Scalar angle, const MatrixBase& axis) : m_axis(axis), m_angle(angle) {} + /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */ + inline AngleAxis(const QuaternionType& q) { *this = q; } + /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */ + template + inline explicit AngleAxis(const MatrixBase& m) { *this = m; } + + Scalar angle() const { return m_angle; } + Scalar& angle() { return m_angle; } + + const Vector3& axis() const { return m_axis; } + Vector3& axis() { return m_axis; } + + /** Concatenates two rotations */ + inline QuaternionType operator* (const AngleAxis& other) const + { return QuaternionType(*this) * QuaternionType(other); } + + /** Concatenates two rotations */ + inline QuaternionType operator* (const QuaternionType& other) const + { return QuaternionType(*this) * other; } + + /** Concatenates two rotations */ + friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b) + { return a * QuaternionType(b); } + + /** Concatenates two rotations */ + inline Matrix3 operator* (const Matrix3& other) const + { return toRotationMatrix() * other; } + + /** Concatenates two rotations */ + inline friend Matrix3 operator* (const Matrix3& a, const AngleAxis& b) + { return a * b.toRotationMatrix(); } + + /** Applies rotation to vector */ + inline Vector3 operator* (const Vector3& other) const + { return toRotationMatrix() * other; } + + /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */ + AngleAxis inverse() const + { return AngleAxis(-m_angle, m_axis); } + + AngleAxis& operator=(const QuaternionType& q); + template + AngleAxis& operator=(const MatrixBase& m); + + template + AngleAxis& fromRotationMatrix(const MatrixBase& m); + Matrix3 toRotationMatrix(void) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit AngleAxis(const AngleAxis& other) + { + m_axis = other.axis().template cast(); + m_angle = Scalar(other.angle()); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const AngleAxis& other, typename NumTraits::Real prec = precision()) const + { return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); } +}; + +/** \ingroup Geometry_Module + * single precision angle-axis type */ +typedef AngleAxis AngleAxisf; +/** \ingroup Geometry_Module + * double precision angle-axis type */ +typedef AngleAxis AngleAxisd; + +/** Set \c *this from a quaternion. + * The axis is normalized. + */ +template +AngleAxis& AngleAxis::operator=(const QuaternionType& q) +{ + Scalar n2 = q.vec().squaredNorm(); + if (n2 < precision()*precision()) + { + m_angle = 0; + m_axis << 1, 0, 0; + } + else + { + m_angle = 2*std::acos(q.w()); + m_axis = q.vec() / ei_sqrt(n2); + } + return *this; +} + +/** Set \c *this from a 3x3 rotation matrix \a mat. + */ +template +template +AngleAxis& AngleAxis::operator=(const MatrixBase& mat) +{ + // Since a direct conversion would not be really faster, + // let's use the robust Quaternion implementation: + return *this = QuaternionType(mat); +} + +/** Constructs and \returns an equivalent 3x3 rotation matrix. + */ +template +typename AngleAxis::Matrix3 +AngleAxis::toRotationMatrix(void) const +{ + Matrix3 res; + Vector3 sin_axis = ei_sin(m_angle) * m_axis; + Scalar c = ei_cos(m_angle); + Vector3 cos1_axis = (Scalar(1)-c) * m_axis; + + Scalar tmp; + tmp = cos1_axis.x() * m_axis.y(); + res.coeffRef(0,1) = tmp - sin_axis.z(); + res.coeffRef(1,0) = tmp + sin_axis.z(); + + tmp = cos1_axis.x() * m_axis.z(); + res.coeffRef(0,2) = tmp + sin_axis.y(); + res.coeffRef(2,0) = tmp - sin_axis.y(); + + tmp = cos1_axis.y() * m_axis.z(); + res.coeffRef(1,2) = tmp - sin_axis.x(); + res.coeffRef(2,1) = tmp + sin_axis.x(); + + res.diagonal() = (cos1_axis.cwise() * m_axis).cwise() + c; + + return res; +} diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt new file mode 100644 index 000000000..c347a8f26 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_Eigen2Support_Geometry_SRCS "*.h") + +INSTALL(FILES + ${Eigen_Eigen2Support_Geometry_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigen2Support/Geometry + ) diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Hyperplane.h new file mode 100644 index 000000000..81c4f55b1 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Hyperplane.h @@ -0,0 +1,265 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + +/** \geometry_module \ingroup Geometry_Module + * + * \class Hyperplane + * + * \brief A hyperplane + * + * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n. + * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane. + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * Notice that the dimension of the hyperplane is _AmbientDim-1. + * + * This class represents an hyperplane as the zero set of the implicit equation + * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) + * and \f$ d \f$ is the distance (offset) to the origin. + */ +template +class Hyperplane +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + typedef Matrix Coefficients; + typedef Block NormalReturnType; + + /** Default constructor without initialization */ + inline explicit Hyperplane() {} + + /** Constructs a dynamic-size hyperplane with \a _dim the dimension + * of the ambient space */ + inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {} + + /** Construct a plane from its normal \a n and a point \a e onto the plane. + * \warning the vector normal is assumed to be normalized. + */ + inline Hyperplane(const VectorType& n, const VectorType& e) + : m_coeffs(n.size()+1) + { + normal() = n; + offset() = -e.eigen2_dot(n); + } + + /** Constructs a plane from its normal \a n and distance to the origin \a d + * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$. + * \warning the vector normal is assumed to be normalized. + */ + inline Hyperplane(const VectorType& n, Scalar d) + : m_coeffs(n.size()+1) + { + normal() = n; + offset() = d; + } + + /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space + * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made. + */ + static inline Hyperplane Through(const VectorType& p0, const VectorType& p1) + { + Hyperplane result(p0.size()); + result.normal() = (p1 - p0).unitOrthogonal(); + result.offset() = -result.normal().eigen2_dot(p0); + return result; + } + + /** Constructs a hyperplane passing through the three points. The dimension of the ambient space + * is required to be exactly 3. + */ + static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) + Hyperplane result(p0.size()); + result.normal() = (p2 - p0).cross(p1 - p0).normalized(); + result.offset() = -result.normal().eigen2_dot(p0); + return result; + } + + /** Constructs a hyperplane passing through the parametrized line \a parametrized. + * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, + * so an arbitrary choice is made. + */ + // FIXME to be consitent with the rest this could be implemented as a static Through function ?? + explicit Hyperplane(const ParametrizedLine& parametrized) + { + normal() = parametrized.direction().unitOrthogonal(); + offset() = -normal().eigen2_dot(parametrized.origin()); + } + + ~Hyperplane() {} + + /** \returns the dimension in which the plane holds */ + inline int dim() const { return int(AmbientDimAtCompileTime)==Dynamic ? m_coeffs.size()-1 : int(AmbientDimAtCompileTime); } + + /** normalizes \c *this */ + void normalize(void) + { + m_coeffs /= normal().norm(); + } + + /** \returns the signed distance between the plane \c *this and a point \a p. + * \sa absDistance() + */ + inline Scalar signedDistance(const VectorType& p) const { return p.eigen2_dot(normal()) + offset(); } + + /** \returns the absolute distance between the plane \c *this and a point \a p. + * \sa signedDistance() + */ + inline Scalar absDistance(const VectorType& p) const { return ei_abs(signedDistance(p)); } + + /** \returns the projection of a point \a p onto the plane \c *this. + */ + inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } + + /** \returns a constant reference to the unit normal vector of the plane, which corresponds + * to the linear part of the implicit equation. + */ + inline const NormalReturnType normal() const { return NormalReturnType(*const_cast(&m_coeffs),0,0,dim(),1); } + + /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds + * to the linear part of the implicit equation. + */ + inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); } + + /** \returns the distance to the origin, which is also the "constant term" of the implicit equation + * \warning the vector normal is assumed to be normalized. + */ + inline const Scalar& offset() const { return m_coeffs.coeff(dim()); } + + /** \returns a non-constant reference to the distance to the origin, which is also the constant part + * of the implicit equation */ + inline Scalar& offset() { return m_coeffs(dim()); } + + /** \returns a constant reference to the coefficients c_i of the plane equation: + * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ + */ + inline const Coefficients& coeffs() const { return m_coeffs; } + + /** \returns a non-constant reference to the coefficients c_i of the plane equation: + * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ + */ + inline Coefficients& coeffs() { return m_coeffs; } + + /** \returns the intersection of *this with \a other. + * + * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. + * + * \note If \a other is approximately parallel to *this, this method will return any point on *this. + */ + VectorType intersection(const Hyperplane& other) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) + Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); + // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests + // whether the two lines are approximately parallel. + if(ei_isMuchSmallerThan(det, Scalar(1))) + { // special case where the two lines are approximately parallel. Pick any point on the first line. + if(ei_abs(coeffs().coeff(1))>ei_abs(coeffs().coeff(0))) + return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); + else + return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); + } + else + { // general case + Scalar invdet = Scalar(1) / det; + return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), + invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2))); + } + } + + /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this. + * + * \param mat the Dim x Dim transformation matrix + * \param traits specifies whether the matrix \a mat represents an Isometry + * or a more generic Affine transformation. The default is Affine. + */ + template + inline Hyperplane& transform(const MatrixBase& mat, TransformTraits traits = Affine) + { + if (traits==Affine) + normal() = mat.inverse().transpose() * normal(); + else if (traits==Isometry) + normal() = mat * normal(); + else + { + ei_assert("invalid traits value in Hyperplane::transform()"); + } + return *this; + } + + /** Applies the transformation \a t to \c *this and returns a reference to \c *this. + * + * \param t the transformation of dimension Dim + * \param traits specifies whether the transformation \a t represents an Isometry + * or a more generic Affine transformation. The default is Affine. + * Other kind of transformations are not supported. + */ + inline Hyperplane& transform(const Transform& t, + TransformTraits traits = Affine) + { + transform(t.linear(), traits); + offset() -= t.translation().eigen2_dot(normal()); + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Hyperplane(const Hyperplane& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Hyperplane& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +protected: + + Coefficients m_coeffs; +}; diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h new file mode 100644 index 000000000..411c4b570 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h @@ -0,0 +1,153 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + + +/** \geometry_module \ingroup Geometry_Module + * + * \class ParametrizedLine + * + * \brief A parametrized line + * + * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit + * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to + * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$. + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + */ +template +class ParametrizedLine +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + /** Default constructor without initialization */ + inline explicit ParametrizedLine() {} + + /** Constructs a dynamic-size line with \a _dim the dimension + * of the ambient space */ + inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {} + + /** Initializes a parametrized line of direction \a direction and origin \a origin. + * \warning the vector direction is assumed to be normalized. + */ + ParametrizedLine(const VectorType& origin, const VectorType& direction) + : m_origin(origin), m_direction(direction) {} + + explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane); + + /** Constructs a parametrized line going from \a p0 to \a p1. */ + static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) + { return ParametrizedLine(p0, (p1-p0).normalized()); } + + ~ParametrizedLine() {} + + /** \returns the dimension in which the line holds */ + inline int dim() const { return m_direction.size(); } + + const VectorType& origin() const { return m_origin; } + VectorType& origin() { return m_origin; } + + const VectorType& direction() const { return m_direction; } + VectorType& direction() { return m_direction; } + + /** \returns the squared distance of a point \a p to its projection onto the line \c *this. + * \sa distance() + */ + RealScalar squaredDistance(const VectorType& p) const + { + VectorType diff = p-origin(); + return (diff - diff.eigen2_dot(direction())* direction()).squaredNorm(); + } + /** \returns the distance of a point \a p to its projection onto the line \c *this. + * \sa squaredDistance() + */ + RealScalar distance(const VectorType& p) const { return ei_sqrt(squaredDistance(p)); } + + /** \returns the projection of a point \a p onto the line \c *this. */ + VectorType projection(const VectorType& p) const + { return origin() + (p-origin()).eigen2_dot(direction()) * direction(); } + + Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane); + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { + return typename internal::cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + inline explicit ParametrizedLine(const ParametrizedLine& other) + { + m_origin = other.origin().template cast(); + m_direction = other.direction().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const ParametrizedLine& other, typename NumTraits::Real prec = precision()) const + { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } + +protected: + + VectorType m_origin, m_direction; +}; + +/** Constructs a parametrized line from a 2D hyperplane + * + * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line + */ +template +inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) + direction() = hyperplane.normal().unitOrthogonal(); + origin() = -hyperplane.normal()*hyperplane.offset(); +} + +/** \returns the parameter value of the intersection between \c *this and the given hyperplane + */ +template +inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane) +{ + return -(hyperplane.offset()+origin().eigen2_dot(hyperplane.normal())) + /(direction().eigen2_dot(hyperplane.normal())); +} diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Quaternion.h new file mode 100644 index 000000000..a75fa42ae --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Quaternion.h @@ -0,0 +1,506 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + +template +struct ei_quaternion_assign_impl; + +/** \geometry_module \ingroup Geometry_Module + * + * \class Quaternion + * + * \brief The quaternion class used to represent 3D orientations and rotations + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * + * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of + * orientations and rotations of objects in three dimensions. Compared to other representations + * like Euler angles or 3x3 matrices, quatertions offer the following advantages: + * \li \b compact storage (4 scalars) + * \li \b efficient to compose (28 flops), + * \li \b stable spherical interpolation + * + * The following two typedefs are provided for convenience: + * \li \c Quaternionf for \c float + * \li \c Quaterniond for \c double + * + * \sa class AngleAxis, class Transform + */ + +template struct ei_traits > +{ + typedef _Scalar Scalar; +}; + +template +class Quaternion : public RotationBase,3> +{ + typedef RotationBase,3> Base; + +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,4) + + using Base::operator*; + + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + + /** the type of the Coefficients 4-vector */ + typedef Matrix Coefficients; + /** the type of a 3D vector */ + typedef Matrix Vector3; + /** the equivalent rotation matrix type */ + typedef Matrix Matrix3; + /** the equivalent angle-axis type */ + typedef AngleAxis AngleAxisType; + + /** \returns the \c x coefficient */ + inline Scalar x() const { return m_coeffs.coeff(0); } + /** \returns the \c y coefficient */ + inline Scalar y() const { return m_coeffs.coeff(1); } + /** \returns the \c z coefficient */ + inline Scalar z() const { return m_coeffs.coeff(2); } + /** \returns the \c w coefficient */ + inline Scalar w() const { return m_coeffs.coeff(3); } + + /** \returns a reference to the \c x coefficient */ + inline Scalar& x() { return m_coeffs.coeffRef(0); } + /** \returns a reference to the \c y coefficient */ + inline Scalar& y() { return m_coeffs.coeffRef(1); } + /** \returns a reference to the \c z coefficient */ + inline Scalar& z() { return m_coeffs.coeffRef(2); } + /** \returns a reference to the \c w coefficient */ + inline Scalar& w() { return m_coeffs.coeffRef(3); } + + /** \returns a read-only vector expression of the imaginary part (x,y,z) */ + inline const Block vec() const { return m_coeffs.template start<3>(); } + + /** \returns a vector expression of the imaginary part (x,y,z) */ + inline Block vec() { return m_coeffs.template start<3>(); } + + /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ + inline const Coefficients& coeffs() const { return m_coeffs; } + + /** \returns a vector expression of the coefficients (x,y,z,w) */ + inline Coefficients& coeffs() { return m_coeffs; } + + /** Default constructor leaving the quaternion uninitialized. */ + inline Quaternion() {} + + /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from + * its four coefficients \a w, \a x, \a y and \a z. + * + * \warning Note the order of the arguments: the real \a w coefficient first, + * while internally the coefficients are stored in the following order: + * [\c x, \c y, \c z, \c w] + */ + inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z) + { m_coeffs << x, y, z, w; } + + /** Copy constructor */ + inline Quaternion(const Quaternion& other) { m_coeffs = other.m_coeffs; } + + /** Constructs and initializes a quaternion from the angle-axis \a aa */ + explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; } + + /** Constructs and initializes a quaternion from either: + * - a rotation matrix expression, + * - a 4D vector expression representing quaternion coefficients. + * \sa operator=(MatrixBase) + */ + template + explicit inline Quaternion(const MatrixBase& other) { *this = other; } + + Quaternion& operator=(const Quaternion& other); + Quaternion& operator=(const AngleAxisType& aa); + template + Quaternion& operator=(const MatrixBase& m); + + /** \returns a quaternion representing an identity rotation + * \sa MatrixBase::Identity() + */ + inline static Quaternion Identity() { return Quaternion(1, 0, 0, 0); } + + /** \sa Quaternion::Identity(), MatrixBase::setIdentity() + */ + inline Quaternion& setIdentity() { m_coeffs << 0, 0, 0, 1; return *this; } + + /** \returns the squared norm of the quaternion's coefficients + * \sa Quaternion::norm(), MatrixBase::squaredNorm() + */ + inline Scalar squaredNorm() const { return m_coeffs.squaredNorm(); } + + /** \returns the norm of the quaternion's coefficients + * \sa Quaternion::squaredNorm(), MatrixBase::norm() + */ + inline Scalar norm() const { return m_coeffs.norm(); } + + /** Normalizes the quaternion \c *this + * \sa normalized(), MatrixBase::normalize() */ + inline void normalize() { m_coeffs.normalize(); } + /** \returns a normalized version of \c *this + * \sa normalize(), MatrixBase::normalized() */ + inline Quaternion normalized() const { return Quaternion(m_coeffs.normalized()); } + + /** \returns the dot product of \c *this and \a other + * Geometrically speaking, the dot product of two unit quaternions + * corresponds to the cosine of half the angle between the two rotations. + * \sa angularDistance() + */ + inline Scalar eigen2_dot(const Quaternion& other) const { return m_coeffs.eigen2_dot(other.m_coeffs); } + + inline Scalar angularDistance(const Quaternion& other) const; + + Matrix3 toRotationMatrix(void) const; + + template + Quaternion& setFromTwoVectors(const MatrixBase& a, const MatrixBase& b); + + inline Quaternion operator* (const Quaternion& q) const; + inline Quaternion& operator*= (const Quaternion& q); + + Quaternion inverse(void) const; + Quaternion conjugate(void) const; + + Quaternion slerp(Scalar t, const Quaternion& other) const; + + template + Vector3 operator* (const MatrixBase& vec) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Quaternion(const Quaternion& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Quaternion& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +protected: + Coefficients m_coeffs; +}; + +/** \ingroup Geometry_Module + * single precision quaternion type */ +typedef Quaternion Quaternionf; +/** \ingroup Geometry_Module + * double precision quaternion type */ +typedef Quaternion Quaterniond; + +// Generic Quaternion * Quaternion product +template inline Quaternion +ei_quaternion_product(const Quaternion& a, const Quaternion& b) +{ + return Quaternion + ( + a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), + a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), + a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), + a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x() + ); +} + +/** \returns the concatenation of two rotations as a quaternion-quaternion product */ +template +inline Quaternion Quaternion::operator* (const Quaternion& other) const +{ + return ei_quaternion_product(*this,other); +} + +/** \sa operator*(Quaternion) */ +template +inline Quaternion& Quaternion::operator*= (const Quaternion& other) +{ + return (*this = *this * other); +} + +/** Rotation of a vector by a quaternion. + * \remarks If the quaternion is used to rotate several points (>1) + * then it is much more efficient to first convert it to a 3x3 Matrix. + * Comparison of the operation cost for n transformations: + * - Quaternion: 30n + * - Via a Matrix3: 24 + 15n + */ +template +template +inline typename Quaternion::Vector3 +Quaternion::operator* (const MatrixBase& v) const +{ + // Note that this algorithm comes from the optimization by hand + // of the conversion to a Matrix followed by a Matrix/Vector product. + // It appears to be much faster than the common algorithm found + // in the litterature (30 versus 39 flops). It also requires two + // Vector3 as temporaries. + Vector3 uv; + uv = 2 * this->vec().cross(v); + return v + this->w() * uv + this->vec().cross(uv); +} + +template +inline Quaternion& Quaternion::operator=(const Quaternion& other) +{ + m_coeffs = other.m_coeffs; + return *this; +} + +/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this + */ +template +inline Quaternion& Quaternion::operator=(const AngleAxisType& aa) +{ + Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings + this->w() = ei_cos(ha); + this->vec() = ei_sin(ha) * aa.axis(); + return *this; +} + +/** Set \c *this from the expression \a xpr: + * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion + * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix + * and \a xpr is converted to a quaternion + */ +template +template +inline Quaternion& Quaternion::operator=(const MatrixBase& xpr) +{ + ei_quaternion_assign_impl::run(*this, xpr.derived()); + return *this; +} + +/** Convert the quaternion to a 3x3 rotation matrix */ +template +inline typename Quaternion::Matrix3 +Quaternion::toRotationMatrix(void) const +{ + // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!) + // if not inlined then the cost of the return by value is huge ~ +35%, + // however, not inlining this function is an order of magnitude slower, so + // it has to be inlined, and so the return by value is not an issue + Matrix3 res; + + const Scalar tx = 2*this->x(); + const Scalar ty = 2*this->y(); + const Scalar tz = 2*this->z(); + const Scalar twx = tx*this->w(); + const Scalar twy = ty*this->w(); + const Scalar twz = tz*this->w(); + const Scalar txx = tx*this->x(); + const Scalar txy = ty*this->x(); + const Scalar txz = tz*this->x(); + const Scalar tyy = ty*this->y(); + const Scalar tyz = tz*this->y(); + const Scalar tzz = tz*this->z(); + + res.coeffRef(0,0) = 1-(tyy+tzz); + res.coeffRef(0,1) = txy-twz; + res.coeffRef(0,2) = txz+twy; + res.coeffRef(1,0) = txy+twz; + res.coeffRef(1,1) = 1-(txx+tzz); + res.coeffRef(1,2) = tyz-twx; + res.coeffRef(2,0) = txz-twy; + res.coeffRef(2,1) = tyz+twx; + res.coeffRef(2,2) = 1-(txx+tyy); + + return res; +} + +/** Sets *this to be a quaternion representing a rotation sending the vector \a a to the vector \a b. + * + * \returns a reference to *this. + * + * Note that the two input vectors do \b not have to be normalized. + */ +template +template +inline Quaternion& Quaternion::setFromTwoVectors(const MatrixBase& a, const MatrixBase& b) +{ + Vector3 v0 = a.normalized(); + Vector3 v1 = b.normalized(); + Scalar c = v0.eigen2_dot(v1); + + // if dot == 1, vectors are the same + if (ei_isApprox(c,Scalar(1))) + { + // set to identity + this->w() = 1; this->vec().setZero(); + return *this; + } + // if dot == -1, vectors are opposites + if (ei_isApprox(c,Scalar(-1))) + { + this->vec() = v0.unitOrthogonal(); + this->w() = 0; + return *this; + } + + Vector3 axis = v0.cross(v1); + Scalar s = ei_sqrt((Scalar(1)+c)*Scalar(2)); + Scalar invs = Scalar(1)/s; + this->vec() = axis * invs; + this->w() = s * Scalar(0.5); + + return *this; +} + +/** \returns the multiplicative inverse of \c *this + * Note that in most cases, i.e., if you simply want the opposite rotation, + * and/or the quaternion is normalized, then it is enough to use the conjugate. + * + * \sa Quaternion::conjugate() + */ +template +inline Quaternion Quaternion::inverse() const +{ + // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? + Scalar n2 = this->squaredNorm(); + if (n2 > 0) + return Quaternion(conjugate().coeffs() / n2); + else + { + // return an invalid result to flag the error + return Quaternion(Coefficients::Zero()); + } +} + +/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse + * if the quaternion is normalized. + * The conjugate of a quaternion represents the opposite rotation. + * + * \sa Quaternion::inverse() + */ +template +inline Quaternion Quaternion::conjugate() const +{ + return Quaternion(this->w(),-this->x(),-this->y(),-this->z()); +} + +/** \returns the angle (in radian) between two rotations + * \sa eigen2_dot() + */ +template +inline Scalar Quaternion::angularDistance(const Quaternion& other) const +{ + double d = ei_abs(this->eigen2_dot(other)); + if (d>=1.0) + return 0; + return Scalar(2) * std::acos(d); +} + +/** \returns the spherical linear interpolation between the two quaternions + * \c *this and \a other at the parameter \a t + */ +template +Quaternion Quaternion::slerp(Scalar t, const Quaternion& other) const +{ + static const Scalar one = Scalar(1) - machine_epsilon(); + Scalar d = this->eigen2_dot(other); + Scalar absD = ei_abs(d); + + Scalar scale0; + Scalar scale1; + + if (absD>=one) + { + scale0 = Scalar(1) - t; + scale1 = t; + } + else + { + // theta is the angle between the 2 quaternions + Scalar theta = std::acos(absD); + Scalar sinTheta = ei_sin(theta); + + scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta; + scale1 = ei_sin( ( t * theta) ) / sinTheta; + if (d<0) + scale1 = -scale1; + } + + return Quaternion(scale0 * coeffs() + scale1 * other.coeffs()); +} + +// set from a rotation matrix +template +struct ei_quaternion_assign_impl +{ + typedef typename Other::Scalar Scalar; + inline static void run(Quaternion& q, const Other& mat) + { + // This algorithm comes from "Quaternion Calculus and Fast Animation", + // Ken Shoemake, 1987 SIGGRAPH course notes + Scalar t = mat.trace(); + if (t > 0) + { + t = ei_sqrt(t + Scalar(1.0)); + q.w() = Scalar(0.5)*t; + t = Scalar(0.5)/t; + q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; + q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t; + q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t; + } + else + { + int i = 0; + if (mat.coeff(1,1) > mat.coeff(0,0)) + i = 1; + if (mat.coeff(2,2) > mat.coeff(i,i)) + i = 2; + int j = (i+1)%3; + int k = (j+1)%3; + + t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); + q.coeffs().coeffRef(i) = Scalar(0.5) * t; + t = Scalar(0.5)/t; + q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; + q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t; + q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t; + } + } +}; + +// set from a vector of coefficients assumed to be a quaternion +template +struct ei_quaternion_assign_impl +{ + typedef typename Other::Scalar Scalar; + inline static void run(Quaternion& q, const Other& vec) + { + q.coeffs() = vec; + } +}; diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Rotation2D.h new file mode 100644 index 000000000..ee7c80e7e --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Rotation2D.h @@ -0,0 +1,157 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + + +/** \geometry_module \ingroup Geometry_Module + * + * \class Rotation2D + * + * \brief Represents a rotation/orientation in a 2 dimensional space. + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * + * This class is equivalent to a single scalar representing a counter clock wise rotation + * as a single angle in radian. It provides some additional features such as the automatic + * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar + * interface to Quaternion in order to facilitate the writing of generic algorithms + * dealing with rotations. + * + * \sa class Quaternion, class Transform + */ +template struct ei_traits > +{ + typedef _Scalar Scalar; +}; + +template +class Rotation2D : public RotationBase,2> +{ + typedef RotationBase,2> Base; + +public: + + using Base::operator*; + + enum { Dim = 2 }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Matrix Vector2; + typedef Matrix Matrix2; + +protected: + + Scalar m_angle; + +public: + + /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ + inline Rotation2D(Scalar a) : m_angle(a) {} + + /** \returns the rotation angle */ + inline Scalar angle() const { return m_angle; } + + /** \returns a read-write reference to the rotation angle */ + inline Scalar& angle() { return m_angle; } + + /** \returns the inverse rotation */ + inline Rotation2D inverse() const { return -m_angle; } + + /** Concatenates two rotations */ + inline Rotation2D operator*(const Rotation2D& other) const + { return m_angle + other.m_angle; } + + /** Concatenates two rotations */ + inline Rotation2D& operator*=(const Rotation2D& other) + { return m_angle += other.m_angle; return *this; } + + /** Applies the rotation to a 2D vector */ + Vector2 operator* (const Vector2& vec) const + { return toRotationMatrix() * vec; } + + template + Rotation2D& fromRotationMatrix(const MatrixBase& m); + Matrix2 toRotationMatrix(void) const; + + /** \returns the spherical interpolation between \c *this and \a other using + * parameter \a t. It is in fact equivalent to a linear interpolation. + */ + inline Rotation2D slerp(Scalar t, const Rotation2D& other) const + { return m_angle * (1-t) + other.angle() * t; } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Rotation2D(const Rotation2D& other) + { + m_angle = Scalar(other.angle()); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Rotation2D& other, typename NumTraits::Real prec = precision()) const + { return ei_isApprox(m_angle,other.m_angle, prec); } +}; + +/** \ingroup Geometry_Module + * single precision 2D rotation type */ +typedef Rotation2D Rotation2Df; +/** \ingroup Geometry_Module + * double precision 2D rotation type */ +typedef Rotation2D Rotation2Dd; + +/** Set \c *this from a 2x2 rotation matrix \a mat. + * In other words, this function extract the rotation angle + * from the rotation matrix. + */ +template +template +Rotation2D& Rotation2D::fromRotationMatrix(const MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) + m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0)); + return *this; +} + +/** Constructs and \returns an equivalent 2x2 rotation matrix. + */ +template +typename Rotation2D::Matrix2 +Rotation2D::toRotationMatrix(void) const +{ + Scalar sinA = ei_sin(m_angle); + Scalar cosA = ei_cos(m_angle); + return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); +} diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/RotationBase.h new file mode 100644 index 000000000..2f494f198 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/RotationBase.h @@ -0,0 +1,134 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + +// this file aims to contains the various representations of rotation/orientation +// in 2D and 3D space excepted Matrix and Quaternion. + +/** \class RotationBase + * + * \brief Common base class for compact rotation representations + * + * \param Derived is the derived type, i.e., a rotation type + * \param _Dim the dimension of the space + */ +template +class RotationBase +{ + public: + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef typename ei_traits::Scalar Scalar; + + /** corresponding linear transformation matrix type */ + typedef Matrix RotationMatrixType; + + inline const Derived& derived() const { return *static_cast(this); } + inline Derived& derived() { return *static_cast(this); } + + /** \returns an equivalent rotation matrix */ + inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); } + + /** \returns the inverse rotation */ + inline Derived inverse() const { return derived().inverse(); } + + /** \returns the concatenation of the rotation \c *this with a translation \a t */ + inline Transform operator*(const Translation& t) const + { return toRotationMatrix() * t; } + + /** \returns the concatenation of the rotation \c *this with a scaling \a s */ + inline RotationMatrixType operator*(const Scaling& s) const + { return toRotationMatrix() * s; } + + /** \returns the concatenation of the rotation \c *this with an affine transformation \a t */ + inline Transform operator*(const Transform& t) const + { return toRotationMatrix() * t; } +}; + +/** \geometry_module + * + * Constructs a Dim x Dim rotation matrix from the rotation \a r + */ +template +template +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> +::Matrix(const RotationBase& r) +{ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) + *this = r.toRotationMatrix(); +} + +/** \geometry_module + * + * Set a Dim x Dim rotation matrix from the rotation \a r + */ +template +template +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> +::operator=(const RotationBase& r) +{ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) + return *this = r.toRotationMatrix(); +} + +/** \internal + * + * Helper function to return an arbitrary rotation object to a rotation matrix. + * + * \param Scalar the numeric type of the matrix coefficients + * \param Dim the dimension of the current space + * + * It returns a Dim x Dim fixed size matrix. + * + * Default specializations are provided for: + * - any scalar type (2D), + * - any matrix expression, + * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) + * + * Currently ei_toRotationMatrix is only used by Transform. + * + * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis + */ +template +inline static Matrix ei_toRotationMatrix(const Scalar& s) +{ + EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) + return Rotation2D(s).toRotationMatrix(); +} + +template +inline static Matrix ei_toRotationMatrix(const RotationBase& r) +{ + return r.toRotationMatrix(); +} + +template +inline static const MatrixBase& ei_toRotationMatrix(const MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, + YOU_MADE_A_PROGRAMMING_MISTAKE) + return mat; +} diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Scaling.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Scaling.h new file mode 100644 index 000000000..108e6d7d5 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Scaling.h @@ -0,0 +1,179 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + + +/** \geometry_module \ingroup Geometry_Module + * + * \class Scaling + * + * \brief Represents a possibly non uniform scaling transformation + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * \param _Dim the dimension of the space, can be a compile time value or Dynamic + * + * \note This class is not aimed to be used to store a scaling transformation, + * but rather to make easier the constructions and updates of Transform objects. + * + * \sa class Translation, class Transform + */ +template +class Scaling +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) + /** dimension of the space */ + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** corresponding vector type */ + typedef Matrix VectorType; + /** corresponding linear transformation matrix type */ + typedef Matrix LinearMatrixType; + /** corresponding translation type */ + typedef Translation TranslationType; + /** corresponding affine transformation type */ + typedef Transform TransformType; + +protected: + + VectorType m_coeffs; + +public: + + /** Default constructor without initialization. */ + Scaling() {} + /** Constructs and initialize a uniform scaling transformation */ + explicit inline Scaling(const Scalar& s) { m_coeffs.setConstant(s); } + /** 2D only */ + inline Scaling(const Scalar& sx, const Scalar& sy) + { + ei_assert(Dim==2); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + } + /** 3D only */ + inline Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) + { + ei_assert(Dim==3); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + m_coeffs.z() = sz; + } + /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */ + explicit inline Scaling(const VectorType& coeffs) : m_coeffs(coeffs) {} + + const VectorType& coeffs() const { return m_coeffs; } + VectorType& coeffs() { return m_coeffs; } + + /** Concatenates two scaling */ + inline Scaling operator* (const Scaling& other) const + { return Scaling(coeffs().cwise() * other.coeffs()); } + + /** Concatenates a scaling and a translation */ + inline TransformType operator* (const TranslationType& t) const; + + /** Concatenates a scaling and an affine transformation */ + inline TransformType operator* (const TransformType& t) const; + + /** Concatenates a scaling and a linear transformation matrix */ + // TODO returns an expression + inline LinearMatrixType operator* (const LinearMatrixType& other) const + { return coeffs().asDiagonal() * other; } + + /** Concatenates a linear transformation matrix and a scaling */ + // TODO returns an expression + friend inline LinearMatrixType operator* (const LinearMatrixType& other, const Scaling& s) + { return other * s.coeffs().asDiagonal(); } + + template + inline LinearMatrixType operator*(const RotationBase& r) const + { return *this * r.toRotationMatrix(); } + + /** Applies scaling to vector */ + inline VectorType operator* (const VectorType& other) const + { return coeffs().asDiagonal() * other; } + + /** \returns the inverse scaling */ + inline Scaling inverse() const + { return Scaling(coeffs().cwise().inverse()); } + + inline Scaling& operator=(const Scaling& other) + { + m_coeffs = other.m_coeffs; + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Scaling(const Scaling& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Scaling& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +}; + +/** \addtogroup Geometry_Module */ +//@{ +typedef Scaling Scaling2f; +typedef Scaling Scaling2d; +typedef Scaling Scaling3f; +typedef Scaling Scaling3d; +//@} + +template +inline typename Scaling::TransformType +Scaling::operator* (const TranslationType& t) const +{ + TransformType res; + res.matrix().setZero(); + res.linear().diagonal() = coeffs(); + res.translation() = m_coeffs.cwise() * t.vector(); + res(Dim,Dim) = Scalar(1); + return res; +} + +template +inline typename Scaling::TransformType +Scaling::operator* (const TransformType& t) const +{ + TransformType res = t; + res.prescale(m_coeffs); + return res; +} diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Transform.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Transform.h new file mode 100644 index 000000000..88956c86c --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Transform.h @@ -0,0 +1,798 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + + +// Note that we have to pass Dim and HDim because it is not allowed to use a template +// parameter to define a template specialization. To be more precise, in the following +// specializations, it is not allowed to use Dim+1 instead of HDim. +template< typename Other, + int Dim, + int HDim, + int OtherRows=Other::RowsAtCompileTime, + int OtherCols=Other::ColsAtCompileTime> +struct ei_transform_product_impl; + +/** \geometry_module \ingroup Geometry_Module + * + * \class Transform + * + * \brief Represents an homogeneous transformation in a N dimensional space + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * \param _Dim the dimension of the space + * + * The homography is internally represented and stored as a (Dim+1)^2 matrix which + * is available through the matrix() method. + * + * Conversion methods from/to Qt's QMatrix and QTransform are available if the + * preprocessor token EIGEN_QT_SUPPORT is defined. + * + * \sa class Matrix, class Quaternion + */ +template +class Transform +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) + enum { + Dim = _Dim, ///< space dimension in which the transformation holds + HDim = _Dim+1 ///< size of a respective homogeneous vector + }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** type of the matrix used to represent the transformation */ + typedef Matrix MatrixType; + /** type of the matrix used to represent the linear part of the transformation */ + typedef Matrix LinearMatrixType; + /** type of read/write reference to the linear part of the transformation */ + typedef Block LinearPart; + /** type of read/write reference to the linear part of the transformation */ + typedef const Block ConstLinearPart; + /** type of a vector */ + typedef Matrix VectorType; + /** type of a read/write reference to the translation part of the rotation */ + typedef Block TranslationPart; + /** type of a read/write reference to the translation part of the rotation */ + typedef const Block ConstTranslationPart; + /** corresponding translation type */ + typedef Translation TranslationType; + /** corresponding scaling transformation type */ + typedef Scaling ScalingType; + +protected: + + MatrixType m_matrix; + +public: + + /** Default constructor without initialization of the coefficients. */ + inline Transform() { } + + inline Transform(const Transform& other) + { + m_matrix = other.m_matrix; + } + + inline explicit Transform(const TranslationType& t) { *this = t; } + inline explicit Transform(const ScalingType& s) { *this = s; } + template + inline explicit Transform(const RotationBase& r) { *this = r; } + + inline Transform& operator=(const Transform& other) + { m_matrix = other.m_matrix; return *this; } + + template // MSVC 2005 will commit suicide if BigMatrix has a default value + struct construct_from_matrix + { + static inline void run(Transform *transform, const MatrixBase& other) + { + transform->matrix() = other; + } + }; + + template struct construct_from_matrix + { + static inline void run(Transform *transform, const MatrixBase& other) + { + transform->linear() = other; + transform->translation().setZero(); + transform->matrix()(Dim,Dim) = Scalar(1); + transform->matrix().template block<1,Dim>(Dim,0).setZero(); + } + }; + + /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ + template + inline explicit Transform(const MatrixBase& other) + { + construct_from_matrix::run(this, other); + } + + /** Set \c *this from a (Dim+1)^2 matrix. */ + template + inline Transform& operator=(const MatrixBase& other) + { m_matrix = other; return *this; } + + #ifdef EIGEN_QT_SUPPORT + inline Transform(const QMatrix& other); + inline Transform& operator=(const QMatrix& other); + inline QMatrix toQMatrix(void) const; + inline Transform(const QTransform& other); + inline Transform& operator=(const QTransform& other); + inline QTransform toQTransform(void) const; + #endif + + /** shortcut for m_matrix(row,col); + * \sa MatrixBase::operaror(int,int) const */ + inline Scalar operator() (int row, int col) const { return m_matrix(row,col); } + /** shortcut for m_matrix(row,col); + * \sa MatrixBase::operaror(int,int) */ + inline Scalar& operator() (int row, int col) { return m_matrix(row,col); } + + /** \returns a read-only expression of the transformation matrix */ + inline const MatrixType& matrix() const { return m_matrix; } + /** \returns a writable expression of the transformation matrix */ + inline MatrixType& matrix() { return m_matrix; } + + /** \returns a read-only expression of the linear (linear) part of the transformation */ + inline ConstLinearPart linear() const { return m_matrix.template block(0,0); } + /** \returns a writable expression of the linear (linear) part of the transformation */ + inline LinearPart linear() { return m_matrix.template block(0,0); } + + /** \returns a read-only expression of the translation vector of the transformation */ + inline ConstTranslationPart translation() const { return m_matrix.template block(0,Dim); } + /** \returns a writable expression of the translation vector of the transformation */ + inline TranslationPart translation() { return m_matrix.template block(0,Dim); } + + /** \returns an expression of the product between the transform \c *this and a matrix expression \a other + * + * The right hand side \a other might be either: + * \li a vector of size Dim, + * \li an homogeneous vector of size Dim+1, + * \li a transformation matrix of size Dim+1 x Dim+1. + */ + // note: this function is defined here because some compilers cannot find the respective declaration + template + inline const typename ei_transform_product_impl::ResultType + operator * (const MatrixBase &other) const + { return ei_transform_product_impl::run(*this,other.derived()); } + + /** \returns the product expression of a transformation matrix \a a times a transform \a b + * The transformation matrix \a a must have a Dim+1 x Dim+1 sizes. */ + template + friend inline const typename ProductReturnType::Type + operator * (const MatrixBase &a, const Transform &b) + { return a.derived() * b.matrix(); } + + /** Contatenates two transformations */ + inline const Transform + operator * (const Transform& other) const + { return Transform(m_matrix * other.matrix()); } + + /** \sa MatrixBase::setIdentity() */ + void setIdentity() { m_matrix.setIdentity(); } + static const typename MatrixType::IdentityReturnType Identity() + { + return MatrixType::Identity(); + } + + template + inline Transform& scale(const MatrixBase &other); + + template + inline Transform& prescale(const MatrixBase &other); + + inline Transform& scale(Scalar s); + inline Transform& prescale(Scalar s); + + template + inline Transform& translate(const MatrixBase &other); + + template + inline Transform& pretranslate(const MatrixBase &other); + + template + inline Transform& rotate(const RotationType& rotation); + + template + inline Transform& prerotate(const RotationType& rotation); + + Transform& shear(Scalar sx, Scalar sy); + Transform& preshear(Scalar sx, Scalar sy); + + inline Transform& operator=(const TranslationType& t); + inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); } + inline Transform operator*(const TranslationType& t) const; + + inline Transform& operator=(const ScalingType& t); + inline Transform& operator*=(const ScalingType& s) { return scale(s.coeffs()); } + inline Transform operator*(const ScalingType& s) const; + friend inline Transform operator*(const LinearMatrixType& mat, const Transform& t) + { + Transform res = t; + res.matrix().row(Dim) = t.matrix().row(Dim); + res.matrix().template block(0,0) = (mat * t.matrix().template block(0,0)).lazy(); + return res; + } + + template + inline Transform& operator=(const RotationBase& r); + template + inline Transform& operator*=(const RotationBase& r) { return rotate(r.toRotationMatrix()); } + template + inline Transform operator*(const RotationBase& r) const; + + LinearMatrixType rotation() const; + template + void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; + template + void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const; + + template + Transform& fromPositionOrientationScale(const MatrixBase &position, + const OrientationType& orientation, const MatrixBase &scale); + + inline const MatrixType inverse(TransformTraits traits = Affine) const; + + /** \returns a const pointer to the column major internal matrix */ + const Scalar* data() const { return m_matrix.data(); } + /** \returns a non-const pointer to the column major internal matrix */ + Scalar* data() { return m_matrix.data(); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Transform(const Transform& other) + { m_matrix = other.matrix().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Transform& other, typename NumTraits::Real prec = precision()) const + { return m_matrix.isApprox(other.m_matrix, prec); } + + #ifdef EIGEN_TRANSFORM_PLUGIN + #include EIGEN_TRANSFORM_PLUGIN + #endif + +protected: + +}; + +/** \ingroup Geometry_Module */ +typedef Transform Transform2f; +/** \ingroup Geometry_Module */ +typedef Transform Transform3f; +/** \ingroup Geometry_Module */ +typedef Transform Transform2d; +/** \ingroup Geometry_Module */ +typedef Transform Transform3d; + +/************************** +*** Optional QT support *** +**************************/ + +#ifdef EIGEN_QT_SUPPORT +/** Initialises \c *this from a QMatrix assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform::Transform(const QMatrix& other) +{ + *this = other; +} + +/** Set \c *this from a QMatrix assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform& Transform::operator=(const QMatrix& other) +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + 0, 0, 1; + return *this; +} + +/** \returns a QMatrix from \c *this assuming the dimension is 2. + * + * \warning this convertion might loss data if \c *this is not affine + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +QMatrix Transform::toQMatrix(void) const +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2)); +} + +/** Initialises \c *this from a QTransform assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform::Transform(const QTransform& other) +{ + *this = other; +} + +/** Set \c *this from a QTransform assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform& Transform::operator=(const QTransform& other) +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + other.m13(), other.m23(), other.m33(); + return *this; +} + +/** \returns a QTransform from \c *this assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +QTransform Transform::toQTransform(void) const +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2)); +} +#endif + +/********************* +*** Procedural API *** +*********************/ + +/** Applies on the right the non uniform scale transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \sa prescale() + */ +template +template +Transform& +Transform::scale(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + linear() = (linear() * other.asDiagonal()).lazy(); + return *this; +} + +/** Applies on the right a uniform scale of a factor \a c to \c *this + * and returns a reference to \c *this. + * \sa prescale(Scalar) + */ +template +inline Transform& Transform::scale(Scalar s) +{ + linear() *= s; + return *this; +} + +/** Applies on the left the non uniform scale transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \sa scale() + */ +template +template +Transform& +Transform::prescale(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + m_matrix.template block(0,0) = (other.asDiagonal() * m_matrix.template block(0,0)).lazy(); + return *this; +} + +/** Applies on the left a uniform scale of a factor \a c to \c *this + * and returns a reference to \c *this. + * \sa scale(Scalar) + */ +template +inline Transform& Transform::prescale(Scalar s) +{ + m_matrix.template corner(TopLeft) *= s; + return *this; +} + +/** Applies on the right the translation matrix represented by the vector \a other + * to \c *this and returns a reference to \c *this. + * \sa pretranslate() + */ +template +template +Transform& +Transform::translate(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + translation() += linear() * other; + return *this; +} + +/** Applies on the left the translation matrix represented by the vector \a other + * to \c *this and returns a reference to \c *this. + * \sa translate() + */ +template +template +Transform& +Transform::pretranslate(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + translation() += other; + return *this; +} + +/** Applies on the right the rotation represented by the rotation \a rotation + * to \c *this and returns a reference to \c *this. + * + * The template parameter \a RotationType is the type of the rotation which + * must be known by ei_toRotationMatrix<>. + * + * Natively supported types includes: + * - any scalar (2D), + * - a Dim x Dim matrix expression, + * - a Quaternion (3D), + * - a AngleAxis (3D) + * + * This mechanism is easily extendable to support user types such as Euler angles, + * or a pair of Quaternion for 4D rotations. + * + * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) + */ +template +template +Transform& +Transform::rotate(const RotationType& rotation) +{ + linear() *= ei_toRotationMatrix(rotation); + return *this; +} + +/** Applies on the left the rotation represented by the rotation \a rotation + * to \c *this and returns a reference to \c *this. + * + * See rotate() for further details. + * + * \sa rotate() + */ +template +template +Transform& +Transform::prerotate(const RotationType& rotation) +{ + m_matrix.template block(0,0) = ei_toRotationMatrix(rotation) + * m_matrix.template block(0,0); + return *this; +} + +/** Applies on the right the shear transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \warning 2D only. + * \sa preshear() + */ +template +Transform& +Transform::shear(Scalar sx, Scalar sy) +{ + EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + VectorType tmp = linear().col(0)*sy + linear().col(1); + linear() << linear().col(0) + linear().col(1)*sx, tmp; + return *this; +} + +/** Applies on the left the shear transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \warning 2D only. + * \sa shear() + */ +template +Transform& +Transform::preshear(Scalar sx, Scalar sy) +{ + EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + m_matrix.template block(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block(0,0); + return *this; +} + +/****************************************************** +*** Scaling, Translation and Rotation compatibility *** +******************************************************/ + +template +inline Transform& Transform::operator=(const TranslationType& t) +{ + linear().setIdentity(); + translation() = t.vector(); + m_matrix.template block<1,Dim>(Dim,0).setZero(); + m_matrix(Dim,Dim) = Scalar(1); + return *this; +} + +template +inline Transform Transform::operator*(const TranslationType& t) const +{ + Transform res = *this; + res.translate(t.vector()); + return res; +} + +template +inline Transform& Transform::operator=(const ScalingType& s) +{ + m_matrix.setZero(); + linear().diagonal() = s.coeffs(); + m_matrix.coeffRef(Dim,Dim) = Scalar(1); + return *this; +} + +template +inline Transform Transform::operator*(const ScalingType& s) const +{ + Transform res = *this; + res.scale(s.coeffs()); + return res; +} + +template +template +inline Transform& Transform::operator=(const RotationBase& r) +{ + linear() = ei_toRotationMatrix(r); + translation().setZero(); + m_matrix.template block<1,Dim>(Dim,0).setZero(); + m_matrix.coeffRef(Dim,Dim) = Scalar(1); + return *this; +} + +template +template +inline Transform Transform::operator*(const RotationBase& r) const +{ + Transform res = *this; + res.rotate(r.derived()); + return res; +} + +/************************ +*** Special functions *** +************************/ + +/** \returns the rotation part of the transformation + * \nonstableyet + * + * \svd_module + * + * \sa computeRotationScaling(), computeScalingRotation(), class SVD + */ +template +typename Transform::LinearMatrixType +Transform::rotation() const +{ + LinearMatrixType result; + computeRotationScaling(&result, (LinearMatrixType*)0); + return result; +} + + +/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * \nonstableyet + * + * \svd_module + * + * \sa computeScalingRotation(), rotation(), class SVD + */ +template +template +void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const +{ + JacobiSVD svd(linear(), ComputeFullU|ComputeFullV); + Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 + Matrix sv(svd.singularValues()); + sv.coeffRef(0) *= x; + if(scaling) + { + scaling->noalias() = svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint(); + } + if(rotation) + { + LinearMatrixType m(svd.matrixU()); + m.col(0) /= x; + rotation->noalias() = m * svd.matrixV().adjoint(); + } +} + +/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * \nonstableyet + * + * \svd_module + * + * \sa computeRotationScaling(), rotation(), class SVD + */ +template +template +void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const +{ + JacobiSVD svd(linear(), ComputeFullU|ComputeFullV); + Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 + Matrix sv(svd.singularValues()); + sv.coeffRef(0) *= x; + if(scaling) + { + scaling->noalias() = svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint(); + } + if(rotation) + { + LinearMatrixType m(svd.matrixU()); + m.col(0) /= x; + rotation->noalias() = m * svd.matrixV().adjoint(); + } +} + +/** Convenient method to set \c *this from a position, orientation and scale + * of a 3D object. + */ +template +template +Transform& +Transform::fromPositionOrientationScale(const MatrixBase &position, + const OrientationType& orientation, const MatrixBase &scale) +{ + linear() = ei_toRotationMatrix(orientation); + linear() *= scale.asDiagonal(); + translation() = position; + m_matrix.template block<1,Dim>(Dim,0).setZero(); + m_matrix(Dim,Dim) = Scalar(1); + return *this; +} + +/** \nonstableyet + * + * \returns the inverse transformation matrix according to some given knowledge + * on \c *this. + * + * \param traits allows to optimize the inversion process when the transformion + * is known to be not a general transformation. The possible values are: + * - Projective if the transformation is not necessarily affine, i.e., if the + * last row is not guaranteed to be [0 ... 0 1] + * - Affine is the default, the last row is assumed to be [0 ... 0 1] + * - Isometry if the transformation is only a concatenations of translations + * and rotations. + * + * \warning unless \a traits is always set to NoShear or NoScaling, this function + * requires the generic inverse method of MatrixBase defined in the LU module. If + * you forget to include this module, then you will get hard to debug linking errors. + * + * \sa MatrixBase::inverse() + */ +template +inline const typename Transform::MatrixType +Transform::inverse(TransformTraits traits) const +{ + if (traits == Projective) + { + return m_matrix.inverse(); + } + else + { + MatrixType res; + if (traits == Affine) + { + res.template corner(TopLeft) = linear().inverse(); + } + else if (traits == Isometry) + { + res.template corner(TopLeft) = linear().transpose(); + } + else + { + ei_assert("invalid traits value in Transform::inverse()"); + } + // translation and remaining parts + res.template corner(TopRight) = - res.template corner(TopLeft) * translation(); + res.template corner<1,Dim>(BottomLeft).setZero(); + res.coeffRef(Dim,Dim) = Scalar(1); + return res; + } +} + +/***************************************************** +*** Specializations of operator* with a MatrixBase *** +*****************************************************/ + +template +struct ei_transform_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef typename ProductReturnType::Type ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { return tr.matrix() * other; } +}; + +template +struct ei_transform_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef TransformType ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { + TransformType res; + res.translation() = tr.translation(); + res.matrix().row(Dim) = tr.matrix().row(Dim); + res.linear() = (tr.linear() * other).lazy(); + return res; + } +}; + +template +struct ei_transform_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef typename ProductReturnType::Type ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { return tr.matrix() * other; } +}; + +template +struct ei_transform_product_impl +{ + typedef typename Other::Scalar Scalar; + typedef Transform TransformType; + typedef Matrix ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { return ((tr.linear() * other) + tr.translation()) + * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); } +}; diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Translation.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Translation.h new file mode 100644 index 000000000..e651e3102 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Geometry/Translation.h @@ -0,0 +1,196 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway + + +/** \geometry_module \ingroup Geometry_Module + * + * \class Translation + * + * \brief Represents a translation transformation + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * \param _Dim the dimension of the space, can be a compile time value or Dynamic + * + * \note This class is not aimed to be used to store a translation transformation, + * but rather to make easier the constructions and updates of Transform objects. + * + * \sa class Scaling, class Transform + */ +template +class Translation +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) + /** dimension of the space */ + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** corresponding vector type */ + typedef Matrix VectorType; + /** corresponding linear transformation matrix type */ + typedef Matrix LinearMatrixType; + /** corresponding scaling transformation type */ + typedef Scaling ScalingType; + /** corresponding affine transformation type */ + typedef Transform TransformType; + +protected: + + VectorType m_coeffs; + +public: + + /** Default constructor without initialization. */ + Translation() {} + /** */ + inline Translation(const Scalar& sx, const Scalar& sy) + { + ei_assert(Dim==2); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + } + /** */ + inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) + { + ei_assert(Dim==3); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + m_coeffs.z() = sz; + } + /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */ + explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {} + + const VectorType& vector() const { return m_coeffs; } + VectorType& vector() { return m_coeffs; } + + /** Concatenates two translation */ + inline Translation operator* (const Translation& other) const + { return Translation(m_coeffs + other.m_coeffs); } + + /** Concatenates a translation and a scaling */ + inline TransformType operator* (const ScalingType& other) const; + + /** Concatenates a translation and a linear transformation */ + inline TransformType operator* (const LinearMatrixType& linear) const; + + template + inline TransformType operator*(const RotationBase& r) const + { return *this * r.toRotationMatrix(); } + + /** Concatenates a linear transformation and a translation */ + // its a nightmare to define a templated friend function outside its declaration + friend inline TransformType operator* (const LinearMatrixType& linear, const Translation& t) + { + TransformType res; + res.matrix().setZero(); + res.linear() = linear; + res.translation() = linear * t.m_coeffs; + res.matrix().row(Dim).setZero(); + res(Dim,Dim) = Scalar(1); + return res; + } + + /** Concatenates a translation and an affine transformation */ + inline TransformType operator* (const TransformType& t) const; + + /** Applies translation to vector */ + inline VectorType operator* (const VectorType& other) const + { return m_coeffs + other; } + + /** \returns the inverse translation (opposite) */ + Translation inverse() const { return Translation(-m_coeffs); } + + Translation& operator=(const Translation& other) + { + m_coeffs = other.m_coeffs; + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Translation(const Translation& other) + { m_coeffs = other.vector().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Translation& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +}; + +/** \addtogroup Geometry_Module */ +//@{ +typedef Translation Translation2f; +typedef Translation Translation2d; +typedef Translation Translation3f; +typedef Translation Translation3d; +//@} + + +template +inline typename Translation::TransformType +Translation::operator* (const ScalingType& other) const +{ + TransformType res; + res.matrix().setZero(); + res.linear().diagonal() = other.coeffs(); + res.translation() = m_coeffs; + res(Dim,Dim) = Scalar(1); + return res; +} + +template +inline typename Translation::TransformType +Translation::operator* (const LinearMatrixType& linear) const +{ + TransformType res; + res.matrix().setZero(); + res.linear() = linear; + res.translation() = m_coeffs; + res.matrix().row(Dim).setZero(); + res(Dim,Dim) = Scalar(1); + return res; +} + +template +inline typename Translation::TransformType +Translation::operator* (const TransformType& t) const +{ + TransformType res = t; + res.pretranslate(m_coeffs); + return res; +} diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/LU.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/LU.h new file mode 100644 index 000000000..c23c11baa --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/LU.h @@ -0,0 +1,133 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_LU_H +#define EIGEN2_LU_H + +template +class LU : public FullPivLU +{ + public: + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef Matrix RowVectorType; + typedef Matrix ColVectorType; + + typedef Matrix KernelResultType; + + typedef Matrix ImageResultType; + + typedef FullPivLU Base; + LU() : Base() {} + + template + explicit LU(const T& t) : Base(t), m_originalMatrix(t) {} + + template + bool solve(const MatrixBase& b, ResultType *result) const + { + *result = static_cast(this)->solve(b); + return true; + } + + template + inline void computeInverse(ResultType *result) const + { + solve(MatrixType::Identity(this->rows(), this->cols()), result); + } + + template + void computeKernel(KernelMatrixType *result) const + { + *result = static_cast(this)->kernel(); + } + + template + void computeImage(ImageMatrixType *result) const + { + *result = static_cast(this)->image(m_originalMatrix); + } + + const ImageResultType image() const + { + return static_cast(this)->image(m_originalMatrix); + } + + const MatrixType& m_originalMatrix; +}; + +#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS +/** \lu_module + * + * Synonym of partialPivLu(). + * + * \return the partial-pivoting LU decomposition of \c *this. + * + * \sa class PartialPivLU + */ +template +inline const LU::PlainObject> +MatrixBase::lu() const +{ + return LU(eval()); +} +#endif + +#ifdef EIGEN2_SUPPORT +/** \lu_module + * + * Synonym of partialPivLu(). + * + * \return the partial-pivoting LU decomposition of \c *this. + * + * \sa class PartialPivLU + */ +template +inline const LU::PlainObject> +MatrixBase::eigen2_lu() const +{ + return LU(eval()); +} +#endif + + +#endif // EIGEN2_LU_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/LeastSquares.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/LeastSquares.h new file mode 100644 index 000000000..4b62ffa92 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/LeastSquares.h @@ -0,0 +1,182 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2009 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_LEASTSQUARES_H +#define EIGEN2_LEASTSQUARES_H + +/** \ingroup LeastSquares_Module + * + * \leastsquares_module + * + * For a set of points, this function tries to express + * one of the coords as a linear (affine) function of the other coords. + * + * This is best explained by an example. This function works in full + * generality, for points in a space of arbitrary dimension, and also over + * the complex numbers, but for this example we will work in dimension 3 + * over the real numbers (doubles). + * + * So let us work with the following set of 5 points given by their + * \f$(x,y,z)\f$ coordinates: + * @code + Vector3d points[5]; + points[0] = Vector3d( 3.02, 6.89, -4.32 ); + points[1] = Vector3d( 2.01, 5.39, -3.79 ); + points[2] = Vector3d( 2.41, 6.01, -4.01 ); + points[3] = Vector3d( 2.09, 5.55, -3.86 ); + points[4] = Vector3d( 2.58, 6.32, -4.10 ); + * @endcode + * Suppose that we want to express the second coordinate (\f$y\f$) as a linear + * expression in \f$x\f$ and \f$z\f$, that is, + * \f[ y=ax+bz+c \f] + * for some constants \f$a,b,c\f$. Thus, we want to find the best possible + * constants \f$a,b,c\f$ so that the plane of equation \f$y=ax+bz+c\f$ fits + * best the five above points. To do that, call this function as follows: + * @code + Vector3d coeffs; // will store the coefficients a, b, c + linearRegression( + 5, + &points, + &coeffs, + 1 // the coord to express as a function of + // the other ones. 0 means x, 1 means y, 2 means z. + ); + * @endcode + * Now the vector \a coeffs is approximately + * \f$( 0.495 , -1.927 , -2.906 )\f$. + * Thus, we get \f$a=0.495, b = -1.927, c = -2.906\f$. Let us check for + * instance how near points[0] is from the plane of equation \f$y=ax+bz+c\f$. + * Looking at the coords of points[0], we see that: + * \f[ax+bz+c = 0.495 * 3.02 + (-1.927) * (-4.32) + (-2.906) = 6.91.\f] + * On the other hand, we have \f$y=6.89\f$. We see that the values + * \f$6.91\f$ and \f$6.89\f$ + * are near, so points[0] is very near the plane of equation \f$y=ax+bz+c\f$. + * + * Let's now describe precisely the parameters: + * @param numPoints the number of points + * @param points the array of pointers to the points on which to perform the linear regression + * @param result pointer to the vector in which to store the result. + This vector must be of the same type and size as the + data points. The meaning of its coords is as follows. + For brevity, let \f$n=Size\f$, + \f$r_i=result[i]\f$, + and \f$f=funcOfOthers\f$. Denote by + \f$x_0,\ldots,x_{n-1}\f$ + the n coordinates in the n-dimensional space. + Then the resulting equation is: + \f[ x_f = r_0 x_0 + \cdots + r_{f-1}x_{f-1} + + r_{f+1}x_{f+1} + \cdots + r_{n-1}x_{n-1} + r_n. \f] + * @param funcOfOthers Determines which coord to express as a function of the + others. Coords are numbered starting from 0, so that a + value of 0 means \f$x\f$, 1 means \f$y\f$, + 2 means \f$z\f$, ... + * + * \sa fitHyperplane() + */ +template +void linearRegression(int numPoints, + VectorType **points, + VectorType *result, + int funcOfOthers ) +{ + typedef typename VectorType::Scalar Scalar; + typedef Hyperplane HyperplaneType; + const int size = points[0]->size(); + result->resize(size); + HyperplaneType h(size); + fitHyperplane(numPoints, points, &h); + for(int i = 0; i < funcOfOthers; i++) + result->coeffRef(i) = - h.coeffs()[i] / h.coeffs()[funcOfOthers]; + for(int i = funcOfOthers; i < size; i++) + result->coeffRef(i) = - h.coeffs()[i+1] / h.coeffs()[funcOfOthers]; +} + +/** \ingroup LeastSquares_Module + * + * \leastsquares_module + * + * This function is quite similar to linearRegression(), so we refer to the + * documentation of this function and only list here the differences. + * + * The main difference from linearRegression() is that this function doesn't + * take a \a funcOfOthers argument. Instead, it finds a general equation + * of the form + * \f[ r_0 x_0 + \cdots + r_{n-1}x_{n-1} + r_n = 0, \f] + * where \f$n=Size\f$, \f$r_i=retCoefficients[i]\f$, and we denote by + * \f$x_0,\ldots,x_{n-1}\f$ the n coordinates in the n-dimensional space. + * + * Thus, the vector \a retCoefficients has size \f$n+1\f$, which is another + * difference from linearRegression(). + * + * In practice, this function performs an hyper-plane fit in a total least square sense + * via the following steps: + * 1 - center the data to the mean + * 2 - compute the covariance matrix + * 3 - pick the eigenvector corresponding to the smallest eigenvalue of the covariance matrix + * The ratio of the smallest eigenvalue and the second one gives us a hint about the relevance + * of the solution. This value is optionally returned in \a soundness. + * + * \sa linearRegression() + */ +template +void fitHyperplane(int numPoints, + VectorType **points, + HyperplaneType *result, + typename NumTraits::Real* soundness = 0) +{ + typedef typename VectorType::Scalar Scalar; + typedef Matrix CovMatrixType; + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType) + ei_assert(numPoints >= 1); + int size = points[0]->size(); + ei_assert(size+1 == result->coeffs().size()); + + // compute the mean of the data + VectorType mean = VectorType::Zero(size); + for(int i = 0; i < numPoints; ++i) + mean += *(points[i]); + mean /= numPoints; + + // compute the covariance matrix + CovMatrixType covMat = CovMatrixType::Zero(size, size); + VectorType remean = VectorType::Zero(size); + for(int i = 0; i < numPoints; ++i) + { + VectorType diff = (*(points[i]) - mean).conjugate(); + covMat += diff * diff.adjoint(); + } + + // now we just have to pick the eigen vector with smallest eigen value + SelfAdjointEigenSolver eig(covMat); + result->normal() = eig.eigenvectors().col(0); + if (soundness) + *soundness = eig.eigenvalues().coeff(0)/eig.eigenvalues().coeff(1); + + // let's compute the constant coefficient such that the + // plane pass trough the mean point: + result->offset() = - (result->normal().cwise()* mean).sum(); +} + + +#endif // EIGEN2_LEASTSQUARES_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Macros.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Macros.h new file mode 100644 index 000000000..77e85a41e --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Macros.h @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_MACROS_H +#define EIGEN2_MACROS_H + +#define ei_assert eigen_assert +#define ei_internal_assert eigen_internal_assert + +#define EIGEN_ALIGN_128 EIGEN_ALIGN16 + +#define EIGEN_ARCH_WANTS_ALIGNMENT EIGEN_ALIGN_STATICALLY + +#endif // EIGEN2_MACROS_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/MathFunctions.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/MathFunctions.h new file mode 100644 index 000000000..caa44e63f --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/MathFunctions.h @@ -0,0 +1,68 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_MATH_FUNCTIONS_H +#define EIGEN2_MATH_FUNCTIONS_H + +template inline typename NumTraits::Real ei_real(const T& x) { return internal::real(x); } +template inline typename NumTraits::Real ei_imag(const T& x) { return internal::imag(x); } +template inline T ei_conj(const T& x) { return internal::conj(x); } +template inline typename NumTraits::Real ei_abs (const T& x) { return internal::abs(x); } +template inline typename NumTraits::Real ei_abs2(const T& x) { return internal::abs2(x); } +template inline T ei_sqrt(const T& x) { return internal::sqrt(x); } +template inline T ei_exp (const T& x) { return internal::exp(x); } +template inline T ei_log (const T& x) { return internal::log(x); } +template inline T ei_sin (const T& x) { return internal::sin(x); } +template inline T ei_cos (const T& x) { return internal::cos(x); } +template inline T ei_atan2(const T& x,const T& y) { return internal::atan2(x,y); } +template inline T ei_pow (const T& x,const T& y) { return internal::pow(x,y); } +template inline T ei_random () { return internal::random(); } +template inline T ei_random (const T& x, const T& y) { return internal::random(x, y); } + +template inline T precision () { return NumTraits::dummy_precision(); } +template inline T machine_epsilon () { return NumTraits::epsilon(); } + + +template +inline bool ei_isMuchSmallerThan(const Scalar& x, const OtherScalar& y, + typename NumTraits::Real precision = NumTraits::dummy_precision()) +{ + return internal::isMuchSmallerThan(x, y, precision); +} + +template +inline bool ei_isApprox(const Scalar& x, const Scalar& y, + typename NumTraits::Real precision = NumTraits::dummy_precision()) +{ + return internal::isApprox(x, y, precision); +} + +template +inline bool ei_isApproxOrLessThan(const Scalar& x, const Scalar& y, + typename NumTraits::Real precision = NumTraits::dummy_precision()) +{ + return internal::isApproxOrLessThan(x, y, precision); +} + +#endif // EIGEN2_MATH_FUNCTIONS_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Memory.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Memory.h new file mode 100644 index 000000000..028347541 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Memory.h @@ -0,0 +1,58 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_MEMORY_H +#define EIGEN2_MEMORY_H + +inline void* ei_aligned_malloc(size_t size) { return internal::aligned_malloc(size); } +inline void ei_aligned_free(void *ptr) { internal::aligned_free(ptr); } +inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) { return internal::aligned_realloc(ptr, new_size, old_size); } +inline void* ei_handmade_aligned_malloc(size_t size) { return internal::handmade_aligned_malloc(size); } +inline void ei_handmade_aligned_free(void *ptr) { internal::handmade_aligned_free(ptr); } + +template inline void* ei_conditional_aligned_malloc(size_t size) +{ + return internal::conditional_aligned_malloc(size); +} +template inline void ei_conditional_aligned_free(void *ptr) +{ + internal::conditional_aligned_free(ptr); +} +template inline void* ei_conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size) +{ + return internal::conditional_aligned_realloc(ptr, new_size, old_size); +} + +template inline T* ei_aligned_new(size_t size) +{ + return internal::aligned_new(size); +} +template inline void ei_aligned_delete(T *ptr, size_t size) +{ + return internal::aligned_delete(ptr, size); +} + + + +#endif // EIGEN2_MACROS_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Meta.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Meta.h new file mode 100644 index 000000000..6e500b79a --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Meta.h @@ -0,0 +1,86 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_META_H +#define EIGEN2_META_H + +template +struct ei_traits : internal::traits +{}; + +struct ei_meta_true { enum { ret = 1 }; }; +struct ei_meta_false { enum { ret = 0 }; }; + +template +struct ei_meta_if { typedef Then ret; }; + +template +struct ei_meta_if { typedef Else ret; }; + +template struct ei_is_same_type { enum { ret = 0 }; }; +template struct ei_is_same_type { enum { ret = 1 }; }; + +template struct ei_unref { typedef T type; }; +template struct ei_unref { typedef T type; }; + +template struct ei_unpointer { typedef T type; }; +template struct ei_unpointer { typedef T type; }; +template struct ei_unpointer { typedef T type; }; + +template struct ei_unconst { typedef T type; }; +template struct ei_unconst { typedef T type; }; +template struct ei_unconst { typedef T & type; }; +template struct ei_unconst { typedef T * type; }; + +template struct ei_cleantype { typedef T type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; + +/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer. + * Usage example: \code ei_meta_sqrt<1023>::ret \endcode + */ +template Y))) > + // use ?: instead of || just to shut up a stupid gcc 4.3 warning +class ei_meta_sqrt +{ + enum { + MidX = (InfX+SupX)/2, + TakeInf = MidX*MidX > Y ? 1 : 0, + NewInf = int(TakeInf) ? InfX : int(MidX), + NewSup = int(TakeInf) ? int(MidX) : SupX + }; + public: + enum { ret = ei_meta_sqrt::ret }; +}; + +template +class ei_meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; + +#endif // EIGEN2_META_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/Minor.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/Minor.h index 555d1d7f5..eda91cc32 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/Minor.h +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/Minor.h @@ -38,12 +38,14 @@ * * \sa MatrixBase::minor() */ + +namespace internal { template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; typedef typename MatrixType::StorageKind StorageKind; enum { RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ? @@ -54,11 +56,12 @@ struct ei_traits > int(MatrixType::MaxRowsAtCompileTime) - 1 : Dynamic, MaxColsAtCompileTime = (MatrixType::MaxColsAtCompileTime != Dynamic) ? int(MatrixType::MaxColsAtCompileTime) - 1 : Dynamic, - Flags = _MatrixTypeNested::Flags & HereditaryBits, + Flags = _MatrixTypeNested::Flags & (HereditaryBits | LvalueBit), CoeffReadCost = _MatrixTypeNested::CoeffReadCost // minor is used typically on tiny matrices, // where loops are unrolled and the 'if' evaluates at compile time }; }; +} template class Minor : public MatrixBase > @@ -72,7 +75,7 @@ template class Minor Index row, Index col) : m_matrix(matrix), m_row(row), m_col(col) { - ei_assert(row >= 0 && row < matrix.rows() + eigen_assert(row >= 0 && row < matrix.rows() && col >= 0 && col < matrix.cols()); } diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/QR.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/QR.h new file mode 100644 index 000000000..64f5d5ccb --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/QR.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2011 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_QR_H +#define EIGEN2_QR_H + +template +class QR : public HouseholderQR +{ + public: + + typedef HouseholderQR Base; + typedef Block MatrixRBlockType; + + QR() : Base() {} + + template + explicit QR(const T& t) : Base(t) {} + + template + bool solve(const MatrixBase& b, ResultType *result) const + { + *result = static_cast(this)->solve(b); + return true; + } + + MatrixType matrixQ(void) const { + MatrixType ret = MatrixType::Identity(this->rows(), this->cols()); + ret = this->householderQ() * ret; + return ret; + } + + bool isFullRank() const { + return true; + } + + const TriangularView + matrixR(void) const + { + int cols = this->cols(); + return MatrixRBlockType(this->matrixQR(), 0, 0, cols, cols).template triangularView(); + } +}; + +/** \return the QR decomposition of \c *this. + * + * \sa class QR + */ +template +const QR::PlainObject> +MatrixBase::qr() const +{ + return QR(eval()); +} + + +#endif // EIGEN2_QR_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/SVD.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/SVD.h new file mode 100644 index 000000000..528a0dcd0 --- /dev/null +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/SVD.h @@ -0,0 +1,649 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN2_SVD_H +#define EIGEN2_SVD_H + +/** \ingroup SVD_Module + * \nonstableyet + * + * \class SVD + * + * \brief Standard SVD decomposition of a matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the SVD decomposition + * + * This class performs a standard SVD decomposition of a real matrix A of size \c M x \c N + * with \c M \>= \c N. + * + * + * \sa MatrixBase::SVD() + */ +template class SVD +{ + private: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + enum { + PacketSize = internal::packet_traits::size, + AlignmentMask = int(PacketSize)-1, + MinSize = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime) + }; + + typedef Matrix ColVector; + typedef Matrix RowVector; + + typedef Matrix MatrixUType; + typedef Matrix MatrixVType; + typedef Matrix SingularValuesType; + + public: + + SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7 + + SVD(const MatrixType& matrix) + : m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())), + m_matV(matrix.cols(),matrix.cols()), + m_sigma(std::min(matrix.rows(),matrix.cols())) + { + compute(matrix); + } + + template + bool solve(const MatrixBase &b, ResultType* result) const; + + const MatrixUType& matrixU() const { return m_matU; } + const SingularValuesType& singularValues() const { return m_sigma; } + const MatrixVType& matrixV() const { return m_matV; } + + void compute(const MatrixType& matrix); + SVD& sort(); + + template + void computeUnitaryPositive(UnitaryType *unitary, PositiveType *positive) const; + template + void computePositiveUnitary(PositiveType *positive, UnitaryType *unitary) const; + template + void computeRotationScaling(RotationType *unitary, ScalingType *positive) const; + template + void computeScalingRotation(ScalingType *positive, RotationType *unitary) const; + + protected: + /** \internal */ + MatrixUType m_matU; + /** \internal */ + MatrixVType m_matV; + /** \internal */ + SingularValuesType m_sigma; +}; + +/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix + * + * \note this code has been adapted from JAMA (public domain) + */ +template +void SVD::compute(const MatrixType& matrix) +{ + const int m = matrix.rows(); + const int n = matrix.cols(); + const int nu = std::min(m,n); + ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!"); + ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices"); + + m_matU.resize(m, nu); + m_matU.setZero(); + m_sigma.resize(std::min(m,n)); + m_matV.resize(n,n); + + RowVector e(n); + ColVector work(m); + MatrixType matA(matrix); + const bool wantu = true; + const bool wantv = true; + int i=0, j=0, k=0; + + // Reduce A to bidiagonal form, storing the diagonal elements + // in s and the super-diagonal elements in e. + int nct = std::min(m-1,n); + int nrt = std::max(0,std::min(n-2,m)); + for (k = 0; k < std::max(nct,nrt); ++k) + { + if (k < nct) + { + // Compute the transformation for the k-th column and + // place the k-th diagonal in m_sigma[k]. + m_sigma[k] = matA.col(k).end(m-k).norm(); + if (m_sigma[k] != 0.0) // FIXME + { + if (matA(k,k) < 0.0) + m_sigma[k] = -m_sigma[k]; + matA.col(k).end(m-k) /= m_sigma[k]; + matA(k,k) += 1.0; + } + m_sigma[k] = -m_sigma[k]; + } + + for (j = k+1; j < n; ++j) + { + if ((k < nct) && (m_sigma[k] != 0.0)) + { + // Apply the transformation. + Scalar t = matA.col(k).end(m-k).eigen2_dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ?? + t = -t/matA(k,k); + matA.col(j).end(m-k) += t * matA.col(k).end(m-k); + } + + // Place the k-th row of A into e for the + // subsequent calculation of the row transformation. + e[j] = matA(k,j); + } + + // Place the transformation in U for subsequent back multiplication. + if (wantu & (k < nct)) + m_matU.col(k).end(m-k) = matA.col(k).end(m-k); + + if (k < nrt) + { + // Compute the k-th row transformation and place the + // k-th super-diagonal in e[k]. + e[k] = e.end(n-k-1).norm(); + if (e[k] != 0.0) + { + if (e[k+1] < 0.0) + e[k] = -e[k]; + e.end(n-k-1) /= e[k]; + e[k+1] += 1.0; + } + e[k] = -e[k]; + if ((k+1 < m) & (e[k] != 0.0)) + { + // Apply the transformation. + work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1); + for (j = k+1; j < n; ++j) + matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1); + } + + // Place the transformation in V for subsequent back multiplication. + if (wantv) + m_matV.col(k).end(n-k-1) = e.end(n-k-1); + } + } + + + // Set up the final bidiagonal matrix or order p. + int p = std::min(n,m+1); + if (nct < n) + m_sigma[nct] = matA(nct,nct); + if (m < p) + m_sigma[p-1] = 0.0; + if (nrt+1 < p) + e[nrt] = matA(nrt,p-1); + e[p-1] = 0.0; + + // If required, generate U. + if (wantu) + { + for (j = nct; j < nu; ++j) + { + m_matU.col(j).setZero(); + m_matU(j,j) = 1.0; + } + for (k = nct-1; k >= 0; k--) + { + if (m_sigma[k] != 0.0) + { + for (j = k+1; j < nu; ++j) + { + Scalar t = m_matU.col(k).end(m-k).eigen2_dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ? + t = -t/m_matU(k,k); + m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k); + } + m_matU.col(k).end(m-k) = - m_matU.col(k).end(m-k); + m_matU(k,k) = Scalar(1) + m_matU(k,k); + if (k-1>0) + m_matU.col(k).start(k-1).setZero(); + } + else + { + m_matU.col(k).setZero(); + m_matU(k,k) = 1.0; + } + } + } + + // If required, generate V. + if (wantv) + { + for (k = n-1; k >= 0; k--) + { + if ((k < nrt) & (e[k] != 0.0)) + { + for (j = k+1; j < nu; ++j) + { + Scalar t = m_matV.col(k).end(n-k-1).eigen2_dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ? + t = -t/m_matV(k+1,k); + m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1); + } + } + m_matV.col(k).setZero(); + m_matV(k,k) = 1.0; + } + } + + // Main iteration loop for the singular values. + int pp = p-1; + int iter = 0; + Scalar eps = ei_pow(Scalar(2),ei_is_same_type::ret ? Scalar(-23) : Scalar(-52)); + while (p > 0) + { + int k=0; + int kase=0; + + // Here is where a test for too many iterations would go. + + // This section of the program inspects for + // negligible elements in the s and e arrays. On + // completion the variables kase and k are set as follows. + + // kase = 1 if s(p) and e[k-1] are negligible and k

= -1; --k) + { + if (k == -1) + break; + if (ei_abs(e[k]) <= eps*(ei_abs(m_sigma[k]) + ei_abs(m_sigma[k+1]))) + { + e[k] = 0.0; + break; + } + } + if (k == p-2) + { + kase = 4; + } + else + { + int ks; + for (ks = p-1; ks >= k; --ks) + { + if (ks == k) + break; + Scalar t = (ks != p ? ei_abs(e[ks]) : Scalar(0)) + (ks != k+1 ? ei_abs(e[ks-1]) : Scalar(0)); + if (ei_abs(m_sigma[ks]) <= eps*t) + { + m_sigma[ks] = 0.0; + break; + } + } + if (ks == k) + { + kase = 3; + } + else if (ks == p-1) + { + kase = 1; + } + else + { + kase = 2; + k = ks; + } + } + ++k; + + // Perform the task indicated by kase. + switch (kase) + { + + // Deflate negligible s(p). + case 1: + { + Scalar f(e[p-2]); + e[p-2] = 0.0; + for (j = p-2; j >= k; --j) + { + Scalar t(internal::hypot(m_sigma[j],f)); + Scalar cs(m_sigma[j]/t); + Scalar sn(f/t); + m_sigma[j] = t; + if (j != k) + { + f = -sn*e[j-1]; + e[j-1] = cs*e[j-1]; + } + if (wantv) + { + for (i = 0; i < n; ++i) + { + t = cs*m_matV(i,j) + sn*m_matV(i,p-1); + m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1); + m_matV(i,j) = t; + } + } + } + } + break; + + // Split at negligible s(k). + case 2: + { + Scalar f(e[k-1]); + e[k-1] = 0.0; + for (j = k; j < p; ++j) + { + Scalar t(internal::hypot(m_sigma[j],f)); + Scalar cs( m_sigma[j]/t); + Scalar sn(f/t); + m_sigma[j] = t; + f = -sn*e[j]; + e[j] = cs*e[j]; + if (wantu) + { + for (i = 0; i < m; ++i) + { + t = cs*m_matU(i,j) + sn*m_matU(i,k-1); + m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1); + m_matU(i,j) = t; + } + } + } + } + break; + + // Perform one qr step. + case 3: + { + // Calculate the shift. + Scalar scale = std::max(std::max(std::max(std::max( + ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])), + ei_abs(m_sigma[k])),ei_abs(e[k])); + Scalar sp = m_sigma[p-1]/scale; + Scalar spm1 = m_sigma[p-2]/scale; + Scalar epm1 = e[p-2]/scale; + Scalar sk = m_sigma[k]/scale; + Scalar ek = e[k]/scale; + Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/Scalar(2); + Scalar c = (sp*epm1)*(sp*epm1); + Scalar shift = 0.0; + if ((b != 0.0) || (c != 0.0)) + { + shift = ei_sqrt(b*b + c); + if (b < 0.0) + shift = -shift; + shift = c/(b + shift); + } + Scalar f = (sk + sp)*(sk - sp) + shift; + Scalar g = sk*ek; + + // Chase zeros. + + for (j = k; j < p-1; ++j) + { + Scalar t = internal::hypot(f,g); + Scalar cs = f/t; + Scalar sn = g/t; + if (j != k) + e[j-1] = t; + f = cs*m_sigma[j] + sn*e[j]; + e[j] = cs*e[j] - sn*m_sigma[j]; + g = sn*m_sigma[j+1]; + m_sigma[j+1] = cs*m_sigma[j+1]; + if (wantv) + { + for (i = 0; i < n; ++i) + { + t = cs*m_matV(i,j) + sn*m_matV(i,j+1); + m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1); + m_matV(i,j) = t; + } + } + t = internal::hypot(f,g); + cs = f/t; + sn = g/t; + m_sigma[j] = t; + f = cs*e[j] + sn*m_sigma[j+1]; + m_sigma[j+1] = -sn*e[j] + cs*m_sigma[j+1]; + g = sn*e[j+1]; + e[j+1] = cs*e[j+1]; + if (wantu && (j < m-1)) + { + for (i = 0; i < m; ++i) + { + t = cs*m_matU(i,j) + sn*m_matU(i,j+1); + m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1); + m_matU(i,j) = t; + } + } + } + e[p-2] = f; + iter = iter + 1; + } + break; + + // Convergence. + case 4: + { + // Make the singular values positive. + if (m_sigma[k] <= 0.0) + { + m_sigma[k] = m_sigma[k] < Scalar(0) ? -m_sigma[k] : Scalar(0); + if (wantv) + m_matV.col(k).start(pp+1) = -m_matV.col(k).start(pp+1); + } + + // Order the singular values. + while (k < pp) + { + if (m_sigma[k] >= m_sigma[k+1]) + break; + Scalar t = m_sigma[k]; + m_sigma[k] = m_sigma[k+1]; + m_sigma[k+1] = t; + if (wantv && (k < n-1)) + m_matV.col(k).swap(m_matV.col(k+1)); + if (wantu && (k < m-1)) + m_matU.col(k).swap(m_matU.col(k+1)); + ++k; + } + iter = 0; + p--; + } + break; + } // end big switch + } // end iterations +} + +template +SVD& SVD::sort() +{ + int mu = m_matU.rows(); + int mv = m_matV.rows(); + int n = m_matU.cols(); + + for (int i=0; i p) + { + k = j; + p = m_sigma.coeff(j); + } + } + if (k != i) + { + m_sigma.coeffRef(k) = m_sigma.coeff(i); // i.e. + m_sigma.coeffRef(i) = p; // swaps the i-th and the k-th elements + + int j = mu; + for(int s=0; j!=0; ++s, --j) + std::swap(m_matU.coeffRef(s,i), m_matU.coeffRef(s,k)); + + j = mv; + for (int s=0; j!=0; ++s, --j) + std::swap(m_matV.coeffRef(s,i), m_matV.coeffRef(s,k)); + } + } + return *this; +} + +/** \returns the solution of \f$ A x = b \f$ using the current SVD decomposition of A. + * The parts of the solution corresponding to zero singular values are ignored. + * + * \sa MatrixBase::svd(), LU::solve(), LLT::solve() + */ +template +template +bool SVD::solve(const MatrixBase &b, ResultType* result) const +{ + const int rows = m_matU.rows(); + ei_assert(b.rows() == rows); + + Scalar maxVal = m_sigma.cwise().abs().maxCoeff(); + for (int j=0; j aux = m_matU.transpose() * b.col(j); + + for (int i = 0; i col(j) = m_matV * aux; + } + return true; +} + +/** Computes the polar decomposition of the matrix, as a product unitary x positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * Only for square matrices. + * + * \sa computePositiveUnitary(), computeRotationScaling() + */ +template +template +void SVD::computeUnitaryPositive(UnitaryType *unitary, + PositiveType *positive) const +{ + ei_assert(m_matU.cols() == m_matV.cols() && "Polar decomposition is only for square matrices"); + if(unitary) *unitary = m_matU * m_matV.adjoint(); + if(positive) *positive = m_matV * m_sigma.asDiagonal() * m_matV.adjoint(); +} + +/** Computes the polar decomposition of the matrix, as a product positive x unitary. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * Only for square matrices. + * + * \sa computeUnitaryPositive(), computeRotationScaling() + */ +template +template +void SVD::computePositiveUnitary(UnitaryType *positive, + PositiveType *unitary) const +{ + ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices"); + if(unitary) *unitary = m_matU * m_matV.adjoint(); + if(positive) *positive = m_matU * m_sigma.asDiagonal() * m_matU.adjoint(); +} + +/** decomposes the matrix as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * This method requires the Geometry module. + * + * \sa computeScalingRotation(), computeUnitaryPositive() + */ +template +template +void SVD::computeRotationScaling(RotationType *rotation, ScalingType *scaling) const +{ + ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices"); + Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1 + Matrix sv(m_sigma); + sv.coeffRef(0) *= x; + if(scaling) scaling->lazyAssign(m_matV * sv.asDiagonal() * m_matV.adjoint()); + if(rotation) + { + MatrixType m(m_matU); + m.col(0) /= x; + rotation->lazyAssign(m * m_matV.adjoint()); + } +} + +/** decomposes the matrix as a product scaling x rotation, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * This method requires the Geometry module. + * + * \sa computeRotationScaling(), computeUnitaryPositive() + */ +template +template +void SVD::computeScalingRotation(ScalingType *scaling, RotationType *rotation) const +{ + ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices"); + Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1 + Matrix sv(m_sigma); + sv.coeffRef(0) *= x; + if(scaling) scaling->lazyAssign(m_matU * sv.asDiagonal() * m_matU.adjoint()); + if(rotation) + { + MatrixType m(m_matU); + m.col(0) /= x; + rotation->lazyAssign(m * m_matV.adjoint()); + } +} + + +/** \svd_module + * \returns the SVD decomposition of \c *this + */ +template +inline SVD::PlainObject> +MatrixBase::svd() const +{ + return SVD(derived()); +} + +#endif // EIGEN2_SVD_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/TriangularSolver.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/TriangularSolver.h index a83640e62..e94e47a50 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/TriangularSolver.h +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/TriangularSolver.h @@ -40,14 +40,14 @@ template typename ExpressionType::PlainObject Flagged::solveTriangular(const MatrixBase& other) const { - return m_matrix.template triangularView.solve(other.derived()); + return m_matrix.template triangularView().solve(other.derived()); } template template void Flagged::solveTriangularInPlace(const MatrixBase& other) const { - m_matrix.template triangularView.solveInPlace(other.derived()); + m_matrix.template triangularView().solveInPlace(other.derived()); } #endif // EIGEN_TRIANGULAR_SOLVER2_H diff --git a/gtsam/3rdparty/Eigen/src/Eigen2Support/VectorBlock.h b/gtsam/3rdparty/Eigen/src/Eigen2Support/VectorBlock.h index a7385bcc3..010031d19 100644 --- a/gtsam/3rdparty/Eigen/src/Eigen2Support/VectorBlock.h +++ b/gtsam/3rdparty/Eigen/src/Eigen2Support/VectorBlock.h @@ -23,8 +23,8 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . -#ifndef EIGEN_VECTORBLOCK2_H -#define EIGEN_VECTORBLOCK2_H +#ifndef EIGEN2_VECTORBLOCK_H +#define EIGEN2_VECTORBLOCK_H /** \deprecated use DenseMase::head(Index) */ template @@ -37,11 +37,11 @@ MatrixBase::start(Index size) /** \deprecated use DenseMase::head(Index) */ template -inline const VectorBlock +inline const VectorBlock MatrixBase::start(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), 0, size); + return VectorBlock(derived(), 0, size); } /** \deprecated use DenseMase::tail(Index) */ @@ -55,11 +55,11 @@ MatrixBase::end(Index size) /** \deprecated use DenseMase::tail(Index) */ template -inline const VectorBlock +inline const VectorBlock MatrixBase::end(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), this->size() - size, size); + return VectorBlock(derived(), this->size() - size, size); } /** \deprecated use DenseMase::head() */ @@ -75,11 +75,11 @@ MatrixBase::start() /** \deprecated use DenseMase::head() */ template template -inline const VectorBlock +inline const VectorBlock MatrixBase::start() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), 0); + return VectorBlock(derived(), 0); } /** \deprecated use DenseMase::tail() */ @@ -95,11 +95,11 @@ MatrixBase::end() /** \deprecated use DenseMase::tail() */ template template -inline const VectorBlock +inline const VectorBlock MatrixBase::end() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return VectorBlock(derived(), size() - Size); + return VectorBlock(derived(), size() - Size); } -#endif // EIGEN_VECTORBLOCK2_H +#endif // EIGEN2_VECTORBLOCK_H diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexEigenSolver.h index 7bf1d140e..57e00227d 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexEigenSolver.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexEigenSolver.h @@ -96,7 +96,7 @@ template class ComplexEigenSolver * This is a square matrix with entries of type #ComplexScalar. * The size is the same as the size of #MatrixType. */ - typedef Matrix EigenvectorType; + typedef Matrix EigenvectorType; /** \brief Default constructor. * @@ -169,8 +169,8 @@ template class ComplexEigenSolver */ const EigenvectorType& eigenvectors() const { - ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); - ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } @@ -186,14 +186,15 @@ template class ComplexEigenSolver * This function returns a column vector containing the * eigenvalues. Eigenvalues are repeated according to their * algebraic multiplicity, so there are as many eigenvalues as - * rows in the matrix. + * rows in the matrix. The eigenvalues are not sorted in any particular + * order. * * Example: \include ComplexEigenSolver_eigenvalues.cpp * Output: \verbinclude ComplexEigenSolver_eigenvalues.out */ const EigenvalueType& eigenvalues() const { - ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); return m_eivalues; } @@ -229,7 +230,7 @@ template class ComplexEigenSolver */ ComputationInfo info() const { - ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); return m_schur.info(); } @@ -293,7 +294,7 @@ void ComplexEigenSolver::doComputeEigenvectors(RealScalar matrixnorm { // If the i-th and k-th eigenvalue are equal, then z equals 0. // Use a small value instead, to prevent division by zero. - ei_real_ref(z) = NumTraits::epsilon() * matrixnorm; + internal::real_ref(z) = NumTraits::epsilon() * matrixnorm; } m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z; } diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexSchur.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexSchur.h index edda4211b..b1830f642 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/ComplexSchur.h @@ -30,7 +30,9 @@ #include "./EigenvaluesCommon.h" #include "./HessenbergDecomposition.h" -template struct ei_complex_schur_reduce_to_hessenberg; +namespace internal { +template struct complex_schur_reduce_to_hessenberg; +} /** \eigenvalues_module \ingroup Eigenvalues_Module * @@ -146,8 +148,8 @@ template class ComplexSchur */ const ComplexMatrixType& matrixU() const { - ei_assert(m_isInitialized && "ComplexSchur is not initialized."); - ei_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition."); + eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); + eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition."); return m_matU; } @@ -170,7 +172,7 @@ template class ComplexSchur */ const ComplexMatrixType& matrixT() const { - ei_assert(m_isInitialized && "ComplexSchur is not initialized."); + eigen_assert(m_isInitialized && "ComplexSchur is not initialized."); return m_matT; } @@ -201,7 +203,7 @@ template class ComplexSchur */ ComputationInfo info() const { - ei_assert(m_isInitialized && "RealSchur is not initialized."); + eigen_assert(m_isInitialized && "RealSchur is not initialized."); return m_info; } @@ -222,22 +224,24 @@ template class ComplexSchur bool subdiagonalEntryIsNeglegible(Index i); ComplexScalar computeShift(Index iu, Index iter); void reduceToTriangularForm(bool computeU); - friend struct ei_complex_schur_reduce_to_hessenberg::IsComplex>; + friend struct internal::complex_schur_reduce_to_hessenberg::IsComplex>; }; +namespace internal { + /** Computes the principal value of the square root of the complex \a z. */ template -std::complex ei_sqrt(const std::complex &z) +std::complex sqrt(const std::complex &z) { RealScalar t, tre, tim; - t = ei_abs(z); + t = abs(z); - if (ei_abs(ei_real(z)) <= ei_abs(ei_imag(z))) + if (abs(real(z)) <= abs(imag(z))) { // No cancellation in these formulas - tre = ei_sqrt(RealScalar(0.5)*(t + ei_real(z))); - tim = ei_sqrt(RealScalar(0.5)*(t - ei_real(z))); + tre = sqrt(RealScalar(0.5)*(t + real(z))); + tim = sqrt(RealScalar(0.5)*(t - real(z))); } else { @@ -245,14 +249,14 @@ std::complex ei_sqrt(const std::complex &z) if (z.real() > RealScalar(0)) { tre = t + z.real(); - tim = ei_abs(ei_imag(z))*ei_sqrt(RealScalar(0.5)/tre); - tre = ei_sqrt(RealScalar(0.5)*tre); + tim = abs(imag(z))*sqrt(RealScalar(0.5)/tre); + tre = sqrt(RealScalar(0.5)*tre); } else { tim = t - z.real(); - tre = ei_abs(ei_imag(z))*ei_sqrt(RealScalar(0.5)/tim); - tim = ei_sqrt(RealScalar(0.5)*tim); + tre = abs(imag(z))*sqrt(RealScalar(0.5)/tim); + tim = sqrt(RealScalar(0.5)*tim); } } if(z.imag() < RealScalar(0)) @@ -260,6 +264,7 @@ std::complex ei_sqrt(const std::complex &z) return (std::complex(tre,tim)); } +} // end namespace internal /** If m_matT(i+1,i) is neglegible in floating point arithmetic @@ -268,9 +273,9 @@ std::complex ei_sqrt(const std::complex &z) template inline bool ComplexSchur::subdiagonalEntryIsNeglegible(Index i) { - RealScalar d = ei_norm1(m_matT.coeff(i,i)) + ei_norm1(m_matT.coeff(i+1,i+1)); - RealScalar sd = ei_norm1(m_matT.coeff(i+1,i)); - if (ei_isMuchSmallerThan(sd, d, NumTraits::epsilon())) + RealScalar d = internal::norm1(m_matT.coeff(i,i)) + internal::norm1(m_matT.coeff(i+1,i+1)); + RealScalar sd = internal::norm1(m_matT.coeff(i+1,i)); + if (internal::isMuchSmallerThan(sd, d, NumTraits::epsilon())) { m_matT.coeffRef(i+1,i) = ComplexScalar(0); return true; @@ -286,7 +291,7 @@ typename ComplexSchur::ComplexScalar ComplexSchur::compu if (iter == 10 || iter == 20) { // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f - return ei_abs(ei_real(m_matT.coeff(iu,iu-1))) + ei_abs(ei_real(m_matT.coeff(iu-1,iu-2))); + return internal::abs(internal::real(m_matT.coeff(iu,iu-1))) + internal::abs(internal::real(m_matT.coeff(iu-1,iu-2))); } // compute the shift as one of the eigenvalues of t, the 2x2 @@ -297,19 +302,19 @@ typename ComplexSchur::ComplexScalar ComplexSchur::compu ComplexScalar b = t.coeff(0,1) * t.coeff(1,0); ComplexScalar c = t.coeff(0,0) - t.coeff(1,1); - ComplexScalar disc = ei_sqrt(c*c + RealScalar(4)*b); + ComplexScalar disc = internal::sqrt(c*c + RealScalar(4)*b); ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b; ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1); ComplexScalar eival1 = (trace + disc) / RealScalar(2); ComplexScalar eival2 = (trace - disc) / RealScalar(2); - if(ei_norm1(eival1) > ei_norm1(eival2)) + if(internal::norm1(eival1) > internal::norm1(eival2)) eival2 = det / eival1; else eival1 = det / eival2; // choose the eigenvalue closest to the bottom entry of the diagonal - if(ei_norm1(eival1-t.coeff(1,1)) < ei_norm1(eival2-t.coeff(1,1))) + if(internal::norm1(eival1-t.coeff(1,1)) < internal::norm1(eival2-t.coeff(1,1))) return normt * eival1; else return normt * eival2; @@ -320,7 +325,7 @@ template ComplexSchur& ComplexSchur::compute(const MatrixType& matrix, bool computeU) { m_matUisUptodate = false; - ei_assert(matrix.cols() == matrix.rows()); + eigen_assert(matrix.cols() == matrix.rows()); if(matrix.cols() == 1) { @@ -332,14 +337,16 @@ ComplexSchur& ComplexSchur::compute(const MatrixType& ma return *this; } - ei_complex_schur_reduce_to_hessenberg::IsComplex>::run(*this, matrix, computeU); + internal::complex_schur_reduce_to_hessenberg::IsComplex>::run(*this, matrix, computeU); reduceToTriangularForm(computeU); return *this; } +namespace internal { + /* Reduce given matrix to Hessenberg form */ template -struct ei_complex_schur_reduce_to_hessenberg +struct complex_schur_reduce_to_hessenberg { // this is the implementation for the case IsComplex = true static void run(ComplexSchur& _this, const MatrixType& matrix, bool computeU) @@ -351,7 +358,7 @@ struct ei_complex_schur_reduce_to_hessenberg }; template -struct ei_complex_schur_reduce_to_hessenberg +struct complex_schur_reduce_to_hessenberg { static void run(ComplexSchur& _this, const MatrixType& matrix, bool computeU) { @@ -370,6 +377,8 @@ struct ei_complex_schur_reduce_to_hessenberg } }; +} // end namespace internal + // Reduce the Hessenberg matrix m_matT to triangular form by QR iteration. template void ComplexSchur::reduceToTriangularForm(bool computeU) @@ -411,7 +420,7 @@ void ComplexSchur::reduceToTriangularForm(bool computeU) bulge is chased down to the bottom of the active submatrix. */ ComplexScalar shift = computeShift(iu, iter); - PlanarRotation rot; + JacobiRotation rot; rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il)); m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint()); m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot); diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/EigenSolver.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/EigenSolver.h index 0a5faec52..b3594655a 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/EigenSolver.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/EigenSolver.h @@ -211,8 +211,8 @@ template class EigenSolver */ const MatrixType& pseudoEigenvectors() const { - ei_assert(m_isInitialized && "EigenSolver is not initialized."); - ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } @@ -228,6 +228,7 @@ template class EigenSolver * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2 * blocks of the form * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$. + * These blocks are not sorted in any particular order. * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by * pseudoEigenvectors() satisfy \f$ AV = VD \f$. * @@ -244,7 +245,8 @@ template class EigenSolver * compute(const MatrixType&, bool) has been called before. * * The eigenvalues are repeated according to their algebraic multiplicity, - * so there are as many eigenvalues as rows in the matrix. + * so there are as many eigenvalues as rows in the matrix. The eigenvalues + * are not sorted in any particular order. * * Example: \include EigenSolver_eigenvalues.cpp * Output: \verbinclude EigenSolver_eigenvalues.out @@ -254,7 +256,7 @@ template class EigenSolver */ const EigenvalueType& eigenvalues() const { - ei_assert(m_isInitialized && "EigenSolver is not initialized."); + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); return m_eivalues; } @@ -289,7 +291,7 @@ template class EigenSolver ComputationInfo info() const { - ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); + eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized."); return m_realSchur.info(); } @@ -311,17 +313,17 @@ template class EigenSolver template MatrixType EigenSolver::pseudoEigenvalueMatrix() const { - ei_assert(m_isInitialized && "EigenSolver is not initialized."); + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); Index n = m_eivalues.rows(); MatrixType matD = MatrixType::Zero(n,n); for (Index i=0; i(i,i) << ei_real(m_eivalues.coeff(i)), ei_imag(m_eivalues.coeff(i)), - -ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i)); + matD.template block<2,2>(i,i) << internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)), + -internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i)); ++i; } } @@ -331,13 +333,13 @@ MatrixType EigenSolver::pseudoEigenvalueMatrix() const template typename EigenSolver::EigenvectorsType EigenSolver::eigenvectors() const { - ei_assert(m_isInitialized && "EigenSolver is not initialized."); - ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + eigen_assert(m_isInitialized && "EigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); Index n = m_eivec.cols(); EigenvectorsType matV(n,n); for (Index j=0; j(); @@ -384,7 +386,7 @@ EigenSolver& EigenSolver::compute(const MatrixType& matr else { Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1)); - Scalar z = ei_sqrt(ei_abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); + Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z); m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z); i += 2; @@ -407,7 +409,7 @@ template std::complex cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi) { Scalar r,d; - if (ei_abs(yr) > ei_abs(yi)) + if (internal::abs(yr) > internal::abs(yi)) { r = yi/yr; d = yr + r*yi; @@ -480,14 +482,14 @@ void EigenSolver::doComputeEigenvectors() Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); Scalar t = (x * lastr - lastw * r) / denom; m_matT.coeffRef(i,n) = t; - if (ei_abs(x) > ei_abs(lastw)) + if (internal::abs(x) > internal::abs(lastw)) m_matT.coeffRef(i+1,n) = (-r - w * t) / x; else m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw; } // Overflow control - Scalar t = ei_abs(m_matT.coeff(i,n)); + Scalar t = internal::abs(m_matT.coeff(i,n)); if ((eps * t) * t > 1) m_matT.col(n).tail(size-i) /= t; } @@ -499,16 +501,16 @@ void EigenSolver::doComputeEigenvectors() Index l = n-1; // Last vector component imaginary so matrix is triangular - if (ei_abs(m_matT.coeff(n,n-1)) > ei_abs(m_matT.coeff(n-1,n))) + if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n))) { m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1); m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1); } else { - std::complex cc = cdiv(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q); - m_matT.coeffRef(n-1,n-1) = ei_real(cc); - m_matT.coeffRef(n-1,n) = ei_imag(cc); + std::complex cc = cdiv(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q); + m_matT.coeffRef(n-1,n-1) = internal::real(cc); + m_matT.coeffRef(n-1,n) = internal::imag(cc); } m_matT.coeffRef(n,n-1) = 0.0; m_matT.coeffRef(n,n) = 1.0; @@ -530,8 +532,8 @@ void EigenSolver::doComputeEigenvectors() if (m_eivalues.coeff(i).imag() == 0) { std::complex cc = cdiv(-ra,-sa,w,q); - m_matT.coeffRef(i,n-1) = ei_real(cc); - m_matT.coeffRef(i,n) = ei_imag(cc); + m_matT.coeffRef(i,n-1) = internal::real(cc); + m_matT.coeffRef(i,n) = internal::imag(cc); } else { @@ -541,12 +543,12 @@ void EigenSolver::doComputeEigenvectors() Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; if ((vr == 0.0) && (vi == 0.0)) - vr = eps * norm * (ei_abs(w) + ei_abs(q) + ei_abs(x) + ei_abs(y) + ei_abs(lastw)); + vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw)); std::complex cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi); - m_matT.coeffRef(i,n-1) = ei_real(cc); - m_matT.coeffRef(i,n) = ei_imag(cc); - if (ei_abs(x) > (ei_abs(lastw) + ei_abs(q))) + m_matT.coeffRef(i,n-1) = internal::real(cc); + m_matT.coeffRef(i,n) = internal::imag(cc); + if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q))) { m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x; m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x; @@ -554,13 +556,13 @@ void EigenSolver::doComputeEigenvectors() else { cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q); - m_matT.coeffRef(i+1,n-1) = ei_real(cc); - m_matT.coeffRef(i+1,n) = ei_imag(cc); + m_matT.coeffRef(i+1,n-1) = internal::real(cc); + m_matT.coeffRef(i+1,n) = internal::imag(cc); } } // Overflow control - Scalar t = std::max(ei_abs(m_matT.coeff(i,n-1)),ei_abs(m_matT.coeff(i,n))); + Scalar t = std::max(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n))); if ((eps * t) * t > 1) m_matT.block(i, n-1, size-i, 2) /= t; diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h index a47a3dc2e..a0ece70c4 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h @@ -70,13 +70,9 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT /** \brief Default constructor for fixed-size matrices. * * The default constructor is useful in cases in which the user intends to - * perform decompositions via compute(const MatrixType&, bool) or - * compute(const MatrixType&, const MatrixType&, bool). This constructor + * perform decompositions via compute(). This constructor * can only be used if \p _MatrixType is a fixed-size matrix; use - * SelfAdjointEigenSolver(Index) for dynamic-size matrices. - * - * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp - * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out + * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices. */ GeneralizedSelfAdjointEigenSolver() : Base() {} @@ -86,12 +82,11 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT * eigenvalues and eigenvectors will be computed. * * This constructor is useful for dynamic-size matrices, when the user - * intends to perform decompositions via compute(const MatrixType&, bool) - * or compute(const MatrixType&, const MatrixType&, bool). The \p size + * intends to perform decompositions via compute(). The \p size * parameter is only used as a hint. It is not an error to give a wrong * \p size, but it may impair performance. * - * \sa compute(const MatrixType&, bool) for an example + * \sa compute() for an example */ GeneralizedSelfAdjointEigenSolver(Index size) : Base(size) @@ -182,8 +177,8 @@ template GeneralizedSelfAdjointEigenSolver& GeneralizedSelfAdjointEigenSolver:: compute(const MatrixType& matA, const MatrixType& matB, int options) { - ei_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); - ei_assert((options&~(EigVecMask|GenEigMask))==0 + eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx) diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/HessenbergDecomposition.h index 79554187a..c17f155a5 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/HessenbergDecomposition.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -26,14 +26,17 @@ #ifndef EIGEN_HESSENBERGDECOMPOSITION_H #define EIGEN_HESSENBERGDECOMPOSITION_H +namespace internal { + template struct HessenbergDecompositionMatrixHReturnType; - template -struct ei_traits > +struct traits > { typedef MatrixType ReturnType; }; +} + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -93,6 +96,8 @@ template class HessenbergDecomposition /** \brief Return type of matrixQ() */ typedef typename HouseholderSequence::ConjugateReturnType HouseholderSequenceType; + + typedef internal::HessenbergDecompositionMatrixHReturnType MatrixHReturnType; /** \brief Default constructor; the decomposition will be computed later. * @@ -184,7 +189,7 @@ template class HessenbergDecomposition */ const CoeffVectorType& householderCoefficients() const { - ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); return m_hCoeffs; } @@ -219,7 +224,7 @@ template class HessenbergDecomposition */ const MatrixType& packedMatrix() const { - ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); return m_matrix; } @@ -239,8 +244,10 @@ template class HessenbergDecomposition */ HouseholderSequenceType matrixQ() const { - ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); - return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate(), false, m_matrix.rows() - 1, 1); + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate()) + .setLength(m_matrix.rows() - 1) + .setShift(1); } /** \brief Constructs the Hessenberg matrix H in the decomposition @@ -263,10 +270,10 @@ template class HessenbergDecomposition * * \sa matrixQ(), packedMatrix() */ - HessenbergDecompositionMatrixHReturnType matrixH() const + MatrixHReturnType matrixH() const { - ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); - return HessenbergDecompositionMatrixHReturnType(*this); + eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized."); + return MatrixHReturnType(*this); } private: @@ -319,10 +326,12 @@ void HessenbergDecomposition::_compute(MatrixType& matA, CoeffVector // A = A H' matA.rightCols(remainingSize) - .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), ei_conj(h), &temp.coeffRef(0)); + .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), internal::conj(h), &temp.coeffRef(0)); } } +namespace internal { + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -370,4 +379,6 @@ template struct HessenbergDecompositionMatrixHReturnType const HessenbergDecomposition& m_hess; }; +} + #endif // EIGEN_HESSENBERGDECOMPOSITION_H diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h index e517b6e5a..5591519fb 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h @@ -26,10 +26,10 @@ #ifndef EIGEN_MATRIXBASEEIGENVALUES_H #define EIGEN_MATRIXBASEEIGENVALUES_H - +namespace internal { template -struct ei_eigenvalues_selector +struct eigenvalues_selector { // this is the implementation for the case IsComplex = true static inline typename MatrixBase::EigenvaluesReturnType const @@ -42,7 +42,7 @@ struct ei_eigenvalues_selector }; template -struct ei_eigenvalues_selector +struct eigenvalues_selector { static inline typename MatrixBase::EigenvaluesReturnType const run(const MatrixBase& m) @@ -53,6 +53,8 @@ struct ei_eigenvalues_selector } }; +} // end namespace internal + /** \brief Computes the eigenvalues of a matrix * \returns Column vector containing the eigenvalues. * @@ -77,8 +79,8 @@ template inline typename MatrixBase::EigenvaluesReturnType MatrixBase::eigenvalues() const { - typedef typename ei_traits::Scalar Scalar; - return ei_eigenvalues_selector::IsComplex>::run(derived()); + typedef typename internal::traits::Scalar Scalar; + return internal::eigenvalues_selector::IsComplex>::run(derived()); } /** \brief Computes the eigenvalues of a matrix @@ -135,7 +137,7 @@ MatrixBase::operatorNorm() const typename Derived::PlainObject m_eval(derived()); // FIXME if it is really guaranteed that the eigenvalues are already sorted, // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. - return ei_sqrt((m_eval*m_eval.adjoint()) + return internal::sqrt((m_eval*m_eval.adjoint()) .eval() .template selfadjointView() .eigenvalues() diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/RealSchur.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/RealSchur.h index eb713339b..e8b2f1a99 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/RealSchur.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/RealSchur.h @@ -137,8 +137,8 @@ template class RealSchur */ const MatrixType& matrixU() const { - ei_assert(m_isInitialized && "RealSchur is not initialized."); - ei_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition."); + eigen_assert(m_isInitialized && "RealSchur is not initialized."); + eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition."); return m_matU; } @@ -154,7 +154,7 @@ template class RealSchur */ const MatrixType& matrixT() const { - ei_assert(m_isInitialized && "RealSchur is not initialized."); + eigen_assert(m_isInitialized && "RealSchur is not initialized."); return m_matT; } @@ -183,7 +183,7 @@ template class RealSchur */ ComputationInfo info() const { - ei_assert(m_isInitialized && "RealSchur is not initialized."); + eigen_assert(m_isInitialized && "RealSchur is not initialized."); return m_info; } @@ -247,7 +247,7 @@ RealSchur& RealSchur::compute(const MatrixType& matrix, { m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift; if (iu > 0) - m_matT.coeffRef(iu, iu-1) = Scalar(0); + m_matT.coeffRef(iu, iu-1) = Scalar(0); iu--; iter = 0; } @@ -259,7 +259,8 @@ RealSchur& RealSchur::compute(const MatrixType& matrix, } else // No convergence yet { - Vector3s firstHouseholderVector, shiftInfo; + // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG ) + Vector3s firstHouseholderVector(0,0,0), shiftInfo; computeShift(iu, iter, exshift, shiftInfo); iter = iter + 1; if (iter > m_maxIterations) break; @@ -300,10 +301,10 @@ inline typename MatrixType::Index RealSchur::findSmallSubdiagEntry(I Index res = iu; while (res > 0) { - Scalar s = ei_abs(m_matT.coeff(res-1,res-1)) + ei_abs(m_matT.coeff(res,res)); + Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res)); if (s == 0.0) s = norm; - if (ei_abs(m_matT.coeff(res,res-1)) < NumTraits::epsilon() * s) + if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits::epsilon() * s) break; res--; } @@ -325,8 +326,8 @@ inline void RealSchur::splitOffTwoRows(Index iu, bool computeU, Scal if (q >= 0) // Two real eigenvalues { - Scalar z = ei_sqrt(ei_abs(q)); - PlanarRotation rot; + Scalar z = internal::sqrt(internal::abs(q)); + JacobiRotation rot; if (p >= 0) rot.makeGivens(p + z, m_matT.coeff(iu, iu-1)); else @@ -357,7 +358,7 @@ inline void RealSchur::computeShift(Index iu, Index iter, Scalar& ex exshift += shiftInfo.coeff(0); for (Index i = 0; i <= iu; ++i) m_matT.coeffRef(i,i) -= shiftInfo.coeff(0); - Scalar s = ei_abs(m_matT.coeff(iu,iu-1)) + ei_abs(m_matT.coeff(iu-1,iu-2)); + Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2)); shiftInfo.coeffRef(0) = Scalar(0.75) * s; shiftInfo.coeffRef(1) = Scalar(0.75) * s; shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s; @@ -370,7 +371,7 @@ inline void RealSchur::computeShift(Index iu, Index iter, Scalar& ex s = s * s + shiftInfo.coeff(2); if (s > 0) { - s = ei_sqrt(s); + s = internal::sqrt(s); if (shiftInfo.coeff(1) < shiftInfo.coeff(0)) s = -s; s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); @@ -400,9 +401,9 @@ inline void RealSchur::initFrancisQRStep(Index il, Index iu, const V if (im == il) { break; } - const Scalar lhs = m_matT.coeff(im,im-1) * (ei_abs(v.coeff(1)) + ei_abs(v.coeff(2))); - const Scalar rhs = v.coeff(0) * (ei_abs(m_matT.coeff(im-1,im-1)) + ei_abs(Tmm) + ei_abs(m_matT.coeff(im+1,im+1))); - if (ei_abs(lhs) < NumTraits::epsilon() * rhs) + const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2))); + const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1))); + if (internal::abs(lhs) < NumTraits::epsilon() * rhs) { break; } @@ -443,7 +444,7 @@ inline void RealSchur::performFrancisQRStep(Index il, Index im, Inde m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace); m_matT.block(0, k, std::min(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace); if (computeU) - m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace); + m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace); } } diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index f14baa333..f79492818 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -29,6 +29,9 @@ #include "./EigenvaluesCommon.h" #include "./Tridiagonalization.h" +template +class GeneralizedSelfAdjointEigenSolver; + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -59,12 +62,12 @@ * * Call the function compute() to compute the eigenvalues and eigenvectors of * a given matrix. Alternatively, you can use the - * SelfAdjointEigenSolver(const MatrixType&, bool) constructor which computes + * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes * the eigenvalues and eigenvectors at construction time. Once the eigenvalue * and eigenvectors are computed, they can be retrieved with the eigenvalues() * and eigenvectors() functions. * - * The documentation for SelfAdjointEigenSolver(const MatrixType&, bool) + * The documentation for SelfAdjointEigenSolver(const MatrixType&, int) * contains an example of the typical use of this class. * * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and @@ -101,14 +104,13 @@ template class SelfAdjointEigenSolver * This is a column vector with entries of type #RealScalar. * The length of the vector is the size of \p _MatrixType. */ - typedef typename ei_plain_col_type::type RealVectorType; + typedef typename internal::plain_col_type::type RealVectorType; typedef Tridiagonalization TridiagonalizationType; /** \brief Default constructor for fixed-size matrices. * * The default constructor is useful in cases in which the user intends to - * perform decompositions via compute(const MatrixType&, bool) or - * compute(const MatrixType&, const MatrixType&, bool). This constructor + * perform decompositions via compute(). This constructor * can only be used if \p _MatrixType is a fixed-size matrix; use * SelfAdjointEigenSolver(Index) for dynamic-size matrices. * @@ -128,12 +130,11 @@ template class SelfAdjointEigenSolver * eigenvalues and eigenvectors will be computed. * * This constructor is useful for dynamic-size matrices, when the user - * intends to perform decompositions via compute(const MatrixType&, bool) - * or compute(const MatrixType&, const MatrixType&, bool). The \p size + * intends to perform decompositions via compute(). The \p size * parameter is only used as a hint. It is not an error to give a wrong * \p size, but it may impair performance. * - * \sa compute(const MatrixType&, bool) for an example + * \sa compute() for an example */ SelfAdjointEigenSolver(Index size) : m_eivec(size, size), @@ -148,15 +149,14 @@ template class SelfAdjointEigenSolver * be computed. Only the lower triangular part of the matrix is referenced. * \param[in] options Can be ComputeEigenvectors (default) or EigenvaluesOnly. * - * This constructor calls compute(const MatrixType&, bool) to compute the + * This constructor calls compute(const MatrixType&, int) to compute the * eigenvalues of the matrix \p matrix. The eigenvectors are computed if * \p options equals ComputeEigenvectors. * * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out * - * \sa compute(const MatrixType&, bool), - * SelfAdjointEigenSolver(const MatrixType&, const MatrixType&, bool) + * \sa compute(const MatrixType&, int) */ SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors) : m_eivec(matrix.rows(), matrix.cols()), @@ -195,11 +195,11 @@ template class SelfAdjointEigenSolver * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out * - * \sa SelfAdjointEigenSolver(const MatrixType&, bool) + * \sa SelfAdjointEigenSolver(const MatrixType&, int) */ SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors); - /** \brief Returns the eigenvectors of given matrix (pencil). + /** \brief Returns the eigenvectors of given matrix. * * \returns A const reference to the matrix whose columns are the eigenvectors. * @@ -219,19 +219,20 @@ template class SelfAdjointEigenSolver */ const MatrixType& eigenvectors() const { - ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); - ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec; } - /** \brief Returns the eigenvalues of given matrix (pencil). + /** \brief Returns the eigenvalues of given matrix. * * \returns A const reference to the column vector containing the eigenvalues. * * \pre The eigenvalues have been computed before. * * The eigenvalues are repeated according to their algebraic multiplicity, - * so there are as many eigenvalues as rows in the matrix. + * so there are as many eigenvalues as rows in the matrix. The eigenvalues + * are sorted in increasing order. * * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out @@ -240,7 +241,7 @@ template class SelfAdjointEigenSolver */ const RealVectorType& eigenvalues() const { - ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); return m_eivalues; } @@ -264,8 +265,8 @@ template class SelfAdjointEigenSolver */ MatrixType operatorSqrt() const { - ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); - ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } @@ -289,8 +290,8 @@ template class SelfAdjointEigenSolver */ MatrixType operatorInverseSqrt() const { - ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); - ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); } @@ -300,7 +301,7 @@ template class SelfAdjointEigenSolver */ ComputationInfo info() const { - ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); return m_info; } @@ -310,6 +311,36 @@ template class SelfAdjointEigenSolver */ static const int m_maxIterations = 30; + #ifdef EIGEN2_SUPPORT + SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors) + : m_eivec(matrix.rows(), matrix.cols()), + m_eivalues(matrix.cols()), + m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1), + m_isInitialized(false) + { + compute(matrix, computeEigenvectors); + } + + SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) + : m_eivec(matA.cols(), matA.cols()), + m_eivalues(matA.cols()), + m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1), + m_isInitialized(false) + { + static_cast*>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); + } + + void compute(const MatrixType& matrix, bool computeEigenvectors) + { + compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); + } + + void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) + { + compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly); + } + #endif // EIGEN2_SUPPORT + protected: MatrixType m_eivec; RealVectorType m_eivalues; @@ -335,15 +366,17 @@ template class SelfAdjointEigenSolver * Implemented from Golub's "Matrix Computations", algorithm 8.3.2: * "implicit symmetric QR step with Wilkinson shift" */ -template -static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); +namespace internal { +template +static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); +} template SelfAdjointEigenSolver& SelfAdjointEigenSolver ::compute(const MatrixType& matrix, int options) { - ei_assert(matrix.cols() == matrix.rows()); - ei_assert((options&~(EigVecMask|GenEigMask))==0 + eigen_assert(matrix.cols() == matrix.rows()); + eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && "invalid option parameter"); bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; @@ -352,7 +385,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver if(n==1) { - m_eivalues.coeffRef(0,0) = ei_real(matrix.coeff(0,0)); + m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); if(computeEigenvectors) m_eivec.setOnes(); m_info = Success; @@ -365,10 +398,13 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver RealVectorType& diag = m_eivalues; MatrixType& mat = m_eivec; - mat = matrix; + // map the matrix coefficients to [-1:1] to avoid over- and underflow. + RealScalar scale = matrix.cwiseAbs().maxCoeff(); + if(scale==Scalar(0)) scale = 1; + mat = matrix / scale; m_subdiag.resize(n-1); - ei_tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors); - + internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors); + Index end = n-1; Index start = 0; Index iter = 0; // number of iterations we are working on one element @@ -376,7 +412,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver while (end>0) { for (Index i = start; i& SelfAdjointEigenSolver while (start>0 && m_subdiag[start-1]!=0) start--; - ei_tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); + internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); } if (iter <= m_maxIterations) @@ -421,24 +457,33 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver } } } + + // scale back the eigen values + m_eivalues *= scale; m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; return *this; } -template -static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) +namespace internal { +template +static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) { + // NOTE this version avoids over & underflow, however since the matrix is prescaled, overflow cannot occur, + // and underflows should be meaningless anyway. So I don't any reason to enable this version, but I keep + // it here for reference: +// RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); +// RealScalar e = subdiag[end-1]; +// RealScalar mu = diag[end] - (e / (td + (td>0 ? 1 : -1))) * (e / hypot(td,e)); RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); - RealScalar e2 = ei_abs2(subdiag[end-1]); - RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * ei_sqrt(td*td + e2)); + RealScalar e2 = abs2(subdiag[end-1]); + RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); RealScalar x = diag[start] - mu; RealScalar z = subdiag[start]; - for (Index k = start; k < end; ++k) { - PlanarRotation rot; + JacobiRotation rot; rot.makeGivens(x, z); // do T = G' T G @@ -448,6 +493,7 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]); diag[k+1] = rot.s() * sdk + rot.c() * dkp1; subdiag[k] = rot.c() * sdk - rot.s() * dkp1; + if (k > start) subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z; @@ -459,14 +505,16 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index z = -rot.s() * subdiag[k+1]; subdiag[k + 1] = rot.c() * subdiag[k+1]; } - + // apply the givens rotation to the unit matrix Q = Q * G if (matrixQ) { - Map > q(matrixQ,n,n); + // FIXME if StorageOrder == RowMajor this operation is not very efficient + Map > q(matrixQ,n,n); q.applyOnTheRight(k,k+1,rot); } } } +} // end namespace internal #endif // EIGEN_SELFADJOINTEIGENSOLVER_H diff --git a/gtsam/3rdparty/Eigen/src/Eigenvalues/Tridiagonalization.h b/gtsam/3rdparty/Eigen/src/Eigenvalues/Tridiagonalization.h index 23ae748d4..ae4cdce7a 100644 --- a/gtsam/3rdparty/Eigen/src/Eigenvalues/Tridiagonalization.h +++ b/gtsam/3rdparty/Eigen/src/Eigenvalues/Tridiagonalization.h @@ -26,6 +26,19 @@ #ifndef EIGEN_TRIDIAGONALIZATION_H #define EIGEN_TRIDIAGONALIZATION_H +namespace internal { + +template struct TridiagonalizationMatrixTReturnType; +template +struct traits > +{ + typedef typename MatrixType::PlainObject ReturnType; +}; + +template +void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs); +} + /** \eigenvalues_module \ingroup Eigenvalues_Module * * @@ -78,20 +91,22 @@ template class Tridiagonalization }; typedef Matrix CoeffVectorType; - typedef typename ei_plain_col_type::type DiagonalType; + typedef typename internal::plain_col_type::type DiagonalType; typedef Matrix SubDiagonalType; + typedef typename internal::remove_all::type MatrixTypeRealView; + typedef internal::TridiagonalizationMatrixTReturnType MatrixTReturnType; - typedef typename ei_meta_if::IsComplex, - typename Diagonal::RealReturnType, - Diagonal - >::ret DiagonalReturnType; + typedef typename internal::conditional::IsComplex, + const typename Diagonal::RealReturnType, + const Diagonal + >::type DiagonalReturnType; - typedef typename ei_meta_if::IsComplex, - typename Diagonal< - Block,0 >::RealReturnType, - Diagonal< - Block,0 > - >::ret SubDiagonalReturnType; + typedef typename internal::conditional::IsComplex, + const typename Diagonal< + Block >::RealReturnType, + const Diagonal< + Block > + >::type SubDiagonalReturnType; /** \brief Return type of matrixQ() */ typedef typename HouseholderSequence::ConjugateReturnType HouseholderSequenceType; @@ -129,7 +144,7 @@ template class Tridiagonalization m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1), m_isInitialized(false) { - ei_tridiagonalization_inplace(m_matrix, m_hCoeffs); + internal::tridiagonalization_inplace(m_matrix, m_hCoeffs); m_isInitialized = true; } @@ -154,7 +169,7 @@ template class Tridiagonalization { m_matrix = matrix; m_hCoeffs.resize(matrix.rows()-1, 1); - ei_tridiagonalization_inplace(m_matrix, m_hCoeffs); + internal::tridiagonalization_inplace(m_matrix, m_hCoeffs); m_isInitialized = true; return *this; } @@ -177,7 +192,7 @@ template class Tridiagonalization */ inline CoeffVectorType householderCoefficients() const { - ei_assert(m_isInitialized && "Tridiagonalization is not initialized."); + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return m_hCoeffs; } @@ -214,7 +229,7 @@ template class Tridiagonalization */ inline const MatrixType& packedMatrix() const { - ei_assert(m_isInitialized && "Tridiagonalization is not initialized."); + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return m_matrix; } @@ -235,28 +250,34 @@ template class Tridiagonalization */ HouseholderSequenceType matrixQ() const { - ei_assert(m_isInitialized && "Tridiagonalization is not initialized."); - return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate(), false, m_matrix.rows() - 1, 1); + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate()) + .setLength(m_matrix.rows() - 1) + .setShift(1); } - /** \brief Constructs the tridiagonal matrix T in the decomposition + /** \brief Returns an expression of the tridiagonal matrix T in the decomposition * - * \returns the matrix T + * \returns expression object representing the matrix T * * \pre Either the constructor Tridiagonalization(const MatrixType&) or * the member function compute(const MatrixType&) has been called before * to compute the tridiagonal decomposition of a matrix. * - * This function copies the matrix T from internal data. The diagonal and - * subdiagonal of the packed matrix as returned by packedMatrix() - * represents the matrix T. It may sometimes be sufficient to directly use - * the packed matrix or the vector expressions returned by diagonal() - * and subDiagonal() instead of creating a new matrix with this function. + * Currently, this function can be used to extract the matrix T from internal + * data and copy it to a dense matrix object. In most cases, it may be + * sufficient to directly use the packed matrix or the vector expressions + * returned by diagonal() and subDiagonal() instead of creating a new + * dense copy matrix with this function. * * \sa Tridiagonalization(const MatrixType&) for an example, * matrixQ(), packedMatrix(), diagonal(), subDiagonal() */ - MatrixType matrixT() const; + MatrixTReturnType matrixT() const + { + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); + return MatrixTReturnType(m_matrix.real()); + } /** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition. * @@ -271,7 +292,7 @@ template class Tridiagonalization * * \sa matrixT(), subDiagonal() */ - const DiagonalReturnType diagonal() const; + DiagonalReturnType diagonal() const; /** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition. * @@ -283,7 +304,7 @@ template class Tridiagonalization * * \sa diagonal() for an example, matrixT() */ - const SubDiagonalReturnType subDiagonal() const; + SubDiagonalReturnType subDiagonal() const; protected: @@ -293,39 +314,23 @@ template class Tridiagonalization }; template -const typename Tridiagonalization::DiagonalReturnType +typename Tridiagonalization::DiagonalReturnType Tridiagonalization::diagonal() const { - ei_assert(m_isInitialized && "Tridiagonalization is not initialized."); + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); return m_matrix.diagonal(); } template -const typename Tridiagonalization::SubDiagonalReturnType +typename Tridiagonalization::SubDiagonalReturnType Tridiagonalization::subDiagonal() const { - ei_assert(m_isInitialized && "Tridiagonalization is not initialized."); + eigen_assert(m_isInitialized && "Tridiagonalization is not initialized."); Index n = m_matrix.rows(); - return Block(m_matrix, 1, 0, n-1,n-1).diagonal(); + return Block(m_matrix, 1, 0, n-1,n-1).diagonal(); } -template -typename Tridiagonalization::MatrixType -Tridiagonalization::matrixT() const -{ - // FIXME should this function (and other similar ones) rather take a matrix as argument - // and fill it ? (to avoid temporaries) - ei_assert(m_isInitialized && "Tridiagonalization is not initialized."); - Index n = m_matrix.rows(); - MatrixType matT = m_matrix; - matT.topRightCorner(n-1, n-1).diagonal() = subDiagonal().template cast().conjugate(); - if (n>2) - { - matT.topRightCorner(n-2, n-2).template triangularView().setZero(); - matT.bottomLeftCorner(n-2, n-2).template triangularView().setZero(); - } - return matT; -} +namespace internal { /** \internal * Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place. @@ -351,14 +356,15 @@ Tridiagonalization::matrixT() const * \sa Tridiagonalization::packedMatrix() */ template -void ei_tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) +void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) { - ei_assert(matA.rows()==matA.cols()); - ei_assert(matA.rows()==hCoeffs.size()+1); typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; Index n = matA.rows(); + eigen_assert(n==matA.cols()); + eigen_assert(n==hCoeffs.size()+1 || n==1); + for (Index i = 0; i() - * (ei_conj(h) * matA.col(i).tail(remainingSize))); + * (conj(h) * matA.col(i).tail(remainingSize))); - hCoeffs.tail(n-i-1) += (ei_conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1); + hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1); matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView() .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1); @@ -387,7 +393,7 @@ void ei_tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) template::IsComplex> -struct ei_tridiagonalization_inplace_selector; +struct tridiagonalization_inplace_selector; /** \brief Performs a full tridiagonalization in place * @@ -430,19 +436,19 @@ struct ei_tridiagonalization_inplace_selector; * \sa class Tridiagonalization */ template -void ei_tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) +void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { typedef typename MatrixType::Index Index; //Index n = mat.rows(); - ei_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1); - ei_tridiagonalization_inplace_selector::run(mat, diag, subdiag, extractQ); + eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1); + tridiagonalization_inplace_selector::run(mat, diag, subdiag, extractQ); } /** \internal * General full tridiagonalization */ template -struct ei_tridiagonalization_inplace_selector +struct tridiagonalization_inplace_selector { typedef typename Tridiagonalization::CoeffVectorType CoeffVectorType; typedef typename Tridiagonalization::HouseholderSequenceType HouseholderSequenceType; @@ -451,11 +457,13 @@ struct ei_tridiagonalization_inplace_selector static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { CoeffVectorType hCoeffs(mat.cols()-1); - ei_tridiagonalization_inplace(mat,hCoeffs); + tridiagonalization_inplace(mat,hCoeffs); diag = mat.diagonal().real(); subdiag = mat.template diagonal<-1>().real(); if(extractQ) - mat = HouseholderSequenceType(mat, hCoeffs.conjugate(), false, mat.rows() - 1, 1); + mat = HouseholderSequenceType(mat, hCoeffs.conjugate()) + .setLength(mat.rows() - 1) + .setShift(1); } }; @@ -464,7 +472,7 @@ struct ei_tridiagonalization_inplace_selector * Especially useful for plane fitting. */ template -struct ei_tridiagonalization_inplace_selector +struct tridiagonalization_inplace_selector { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -473,7 +481,7 @@ struct ei_tridiagonalization_inplace_selector static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { diag[0] = mat(0,0); - RealScalar v1norm2 = ei_abs2(mat(2,0)); + RealScalar v1norm2 = abs2(mat(2,0)); if(v1norm2 == RealScalar(0)) { diag[1] = mat(1,1); @@ -485,7 +493,7 @@ struct ei_tridiagonalization_inplace_selector } else { - RealScalar beta = ei_sqrt(ei_abs2(mat(1,0)) + v1norm2); + RealScalar beta = sqrt(abs2(mat(1,0)) + v1norm2); RealScalar invBeta = RealScalar(1)/beta; Scalar m01 = mat(1,0) * invBeta; Scalar m02 = mat(2,0) * invBeta; @@ -508,16 +516,53 @@ struct ei_tridiagonalization_inplace_selector * Trivial specialization for 1x1 matrices */ template -struct ei_tridiagonalization_inplace_selector +struct tridiagonalization_inplace_selector { typedef typename MatrixType::Scalar Scalar; template static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ) { - diag(0,0) = ei_real(mat(0,0)); + diag(0,0) = real(mat(0,0)); if(extractQ) mat(0,0) = Scalar(1); } }; + +/** \internal + * \eigenvalues_module \ingroup Eigenvalues_Module + * + * \brief Expression type for return value of Tridiagonalization::matrixT() + * + * \tparam MatrixType type of underlying dense matrix + */ +template struct TridiagonalizationMatrixTReturnType +: public ReturnByValue > +{ + typedef typename MatrixType::Index Index; + public: + /** \brief Constructor. + * + * \param[in] mat The underlying dense matrix + */ + TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { } + + template + inline void evalTo(ResultType& result) const + { + result.setZero(); + result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate(); + result.diagonal() = m_matrix.diagonal(); + result.template diagonal<-1>() = m_matrix.template diagonal<-1>(); + } + + Index rows() const { return m_matrix.rows(); } + Index cols() const { return m_matrix.cols(); } + + protected: + const typename MatrixType::Nested m_matrix; +}; + +} // end namespace internal + #endif // EIGEN_TRIDIAGONALIZATION_H diff --git a/gtsam/3rdparty/Eigen/src/Geometry/AlignedBox.h b/gtsam/3rdparty/Eigen/src/Geometry/AlignedBox.h index 196a4fc72..d81dcad9e 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/AlignedBox.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/AlignedBox.h @@ -84,7 +84,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline explicit AlignedBox(const MatrixBase& a_p) { - const typename ei_nested::type p(a_p.derived()); + const typename internal::nested::type p(a_p.derived()); m_min = p; m_max = p; } @@ -120,8 +120,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) inline VectorType& max() { return m_max; } /** \returns the center of the box */ - inline const CwiseUnaryOp, - CwiseBinaryOp, VectorType, VectorType> > + inline const CwiseUnaryOp, + const CwiseBinaryOp, const VectorType, const VectorType> > center() const { return (m_min+m_max)/2; } @@ -129,7 +129,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) * Note that this function does not get the same * result for integral or floating scalar types: see */ - inline const CwiseBinaryOp< ei_scalar_difference_op, VectorType, VectorType> sizes() const + inline const CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> sizes() const { return m_max - m_min; } /** \returns the volume of the bounding box */ @@ -140,7 +140,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) * if the length of the diagonal is needed: diagonal().norm() * will provide it. */ - inline CwiseBinaryOp< ei_scalar_difference_op, VectorType, VectorType> diagonal() const + inline CwiseBinaryOp< internal::scalar_difference_op, const VectorType, const VectorType> diagonal() const { return sizes(); } /** \returns the vertex of the bounding box at the corner defined by @@ -178,10 +178,10 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) if(!ScalarTraits::IsInteger) { r[d] = m_min[d] + (m_max[d]-m_min[d]) - * ei_random(Scalar(0), Scalar(1)); + * internal::random(Scalar(0), Scalar(1)); } else - r[d] = ei_random(m_min[d], m_max[d]); + r[d] = internal::random(m_min[d], m_max[d]); } return r; } @@ -190,7 +190,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline bool contains(const MatrixBase& a_p) const { - const typename ei_nested::type p(a_p.derived()); + const typename internal::nested::type p(a_p.derived()); return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all(); } @@ -202,7 +202,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline AlignedBox& extend(const MatrixBase& a_p) { - const typename ei_nested::type p(a_p.derived()); + const typename internal::nested::type p(a_p.derived()); m_min = m_min.cwiseMin(p); m_max = m_max.cwiseMax(p); return *this; @@ -236,7 +236,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline AlignedBox& translate(const MatrixBase& a_t) { - const typename ei_nested::type t(a_t.derived()); + const typename internal::nested::type t(a_t.derived()); m_min += t; m_max += t; return *this; @@ -261,14 +261,14 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) */ template inline NonInteger exteriorDistance(const MatrixBase& p) const - { return ei_sqrt(NonInteger(squaredExteriorDistance(p))); } + { return internal::sqrt(NonInteger(squaredExteriorDistance(p))); } /** \returns the distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa squaredExteriorDistance() */ inline NonInteger exteriorDistance(const AlignedBox& b) const - { return ei_sqrt(NonInteger(squaredExteriorDistance(b))); } + { return internal::sqrt(NonInteger(squaredExteriorDistance(b))); } /** \returns \c *this with scalar type casted to \a NewScalarType * @@ -276,10 +276,10 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const { - return typename ei_cast_return_type >::type(*this); } @@ -309,7 +309,7 @@ template template inline Scalar AlignedBox::squaredExteriorDistance(const MatrixBase& a_p) const { - const typename ei_nested::type p(a_p.derived()); + const typename internal::nested::type p(a_p.derived()); Scalar dist2 = 0.; Scalar aux; for (Index k=0; k struct ei_traits > +namespace internal { +template struct traits > { typedef _Scalar Scalar; }; +} template class AngleAxis : public RotationBase,3> @@ -131,8 +133,8 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const - { return typename ei_cast_return_type >::type(*this); } + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template @@ -149,7 +151,7 @@ public: * * \sa MatrixBase::isApprox() */ bool isApprox(const AngleAxis& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const - { return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); } + { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module @@ -159,8 +161,11 @@ typedef AngleAxis AngleAxisf; * double precision angle-axis type */ typedef AngleAxis AngleAxisd; -/** Set \c *this from a quaternion. +/** Set \c *this from a \b unit quaternion. * The axis is normalized. + * + * \warning As any other method dealing with quaternion, if the input quaternion + * is not normalized then the result is undefined. */ template template @@ -174,8 +179,8 @@ AngleAxis& AngleAxis::operator=(const QuaternionBase::Matrix3 AngleAxis::toRotationMatrix(void) const { Matrix3 res; - Vector3 sin_axis = ei_sin(m_angle) * m_axis; - Scalar c = ei_cos(m_angle); + Vector3 sin_axis = internal::sin(m_angle) * m_axis; + Scalar c = internal::cos(m_angle); Vector3 cos1_axis = (Scalar(1)-c) * m_axis; Scalar tmp; diff --git a/gtsam/3rdparty/Eigen/src/Geometry/EulerAngles.h b/gtsam/3rdparty/Eigen/src/Geometry/EulerAngles.h index f2b3f129e..d246a6ebf 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/EulerAngles.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/EulerAngles.h @@ -60,31 +60,31 @@ MatrixBase::eulerAngles(Index a0, Index a1, Index a2) const if (a0==a2) { Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm(); - res[1] = ei_atan2(s, coeff(i,i)); + res[1] = internal::atan2(s, coeff(i,i)); if (s > epsilon) { - res[0] = ei_atan2(coeff(j,i), coeff(k,i)); - res[2] = ei_atan2(coeff(i,j),-coeff(i,k)); + res[0] = internal::atan2(coeff(j,i), coeff(k,i)); + res[2] = internal::atan2(coeff(i,j),-coeff(i,k)); } else { res[0] = Scalar(0); - res[2] = (coeff(i,i)>0?1:-1)*ei_atan2(-coeff(k,j), coeff(j,j)); + res[2] = (coeff(i,i)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j)); } } else { Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm(); - res[1] = ei_atan2(-coeff(i,k), c); + res[1] = internal::atan2(-coeff(i,k), c); if (c > epsilon) { - res[0] = ei_atan2(coeff(j,k), coeff(k,k)); - res[2] = ei_atan2(coeff(i,j), coeff(i,i)); + res[0] = internal::atan2(coeff(j,k), coeff(k,k)); + res[2] = internal::atan2(coeff(i,j), coeff(i,i)); } else { res[0] = Scalar(0); - res[2] = (coeff(i,k)>0?1:-1)*ei_atan2(-coeff(k,j), coeff(j,j)); + res[2] = (coeff(i,k)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j)); } } if (!odd) diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Homogeneous.h b/gtsam/3rdparty/Eigen/src/Geometry/Homogeneous.h index f05899dc8..2bc4f7e87 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Homogeneous.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Homogeneous.h @@ -39,13 +39,16 @@ * * \sa MatrixBase::homogeneous() */ + +namespace internal { + template -struct ei_traits > - : ei_traits +struct traits > + : traits { - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_nested::type MatrixTypeNested; - typedef typename ei_unref::type _MatrixTypeNested; + typedef typename traits::StorageKind StorageKind; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; enum { RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ? int(MatrixType::RowsAtCompileTime) + 1 : Dynamic, @@ -63,8 +66,10 @@ struct ei_traits > }; }; -template struct ei_homogeneous_left_product_impl; -template struct ei_homogeneous_right_product_impl; +template struct homogeneous_left_product_impl; +template struct homogeneous_right_product_impl; + +} // end namespace internal template class Homogeneous : public MatrixBase > @@ -92,39 +97,27 @@ template class Homogeneous } template - inline const ei_homogeneous_right_product_impl + inline const internal::homogeneous_right_product_impl operator* (const MatrixBase& rhs) const { - ei_assert(int(Direction)==Horizontal); - return ei_homogeneous_right_product_impl(m_matrix,rhs.derived()); + eigen_assert(int(Direction)==Horizontal); + return internal::homogeneous_right_product_impl(m_matrix,rhs.derived()); } template friend - inline const ei_homogeneous_left_product_impl + inline const internal::homogeneous_left_product_impl operator* (const MatrixBase& lhs, const Homogeneous& rhs) { - ei_assert(int(Direction)==Vertical); - return ei_homogeneous_left_product_impl(lhs.derived(),rhs.m_matrix); + eigen_assert(int(Direction)==Vertical); + return internal::homogeneous_left_product_impl(lhs.derived(),rhs.m_matrix); } - template friend - inline const ei_homogeneous_left_product_impl::AffinePartNested> - operator* (const Transform& tr, const Homogeneous& rhs) + template friend + inline const internal::homogeneous_left_product_impl > + operator* (const Transform& lhs, const Homogeneous& rhs) { - ei_assert(int(Direction)==Vertical); - return ei_homogeneous_left_product_impl::AffinePartNested > - (tr.affine(),rhs.m_matrix); - } - - template friend - inline const ei_homogeneous_left_product_impl::MatrixType> - operator* (const Transform& tr, const Homogeneous& rhs) - { - ei_assert(int(Direction)==Vertical); - return ei_homogeneous_left_product_impl::MatrixType> - (tr.matrix(),rhs.m_matrix); + eigen_assert(int(Direction)==Vertical); + return internal::homogeneous_left_product_impl >(lhs,rhs.m_matrix); } protected: @@ -174,11 +167,11 @@ VectorwiseOp::homogeneous() const * * \sa VectorwiseOp::hnormalized() */ template -inline typename MatrixBase::HNormalizedReturnType +inline const typename MatrixBase::HNormalizedReturnType MatrixBase::hnormalized() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); - return StartMinusOne(derived(),0,0, + return ConstStartMinusOne(derived(),0,0, ColsAtCompileTime==1?size()-1:1, ColsAtCompileTime==1?1:size()-1) / coeff(size()-1); } @@ -192,7 +185,7 @@ MatrixBase::hnormalized() const * * \sa MatrixBase::hnormalized() */ template -inline typename VectorwiseOp::HNormalizedReturnType +inline const typename VectorwiseOp::HNormalizedReturnType VectorwiseOp::hnormalized() const { return HNormalized_Block(_expression(),0,0, @@ -210,26 +203,57 @@ VectorwiseOp::hnormalized() const Direction==Horizontal ? _expression().cols()-1 : 1)); } -template -struct ei_traits,Lhs> > +namespace internal { + +template +struct take_matrix_for_product { - typedef typename ei_make_proper_matrix_type< - typename ei_traits::Scalar, - Lhs::RowsAtCompileTime, - MatrixType::ColsAtCompileTime, - MatrixType::PlainObject::Options, - Lhs::MaxRowsAtCompileTime, - MatrixType::MaxColsAtCompileTime>::type ReturnType; + typedef MatrixOrTransformType type; + static const type& run(const type &x) { return x; } +}; + +template +struct take_matrix_for_product > +{ + typedef Transform TransformType; + typedef typename TransformType::ConstAffinePart type; + static const type run (const TransformType& x) { return x.affine(); } +}; + +template +struct take_matrix_for_product > +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType type; + static const type& run (const TransformType& x) { return x.matrix(); } }; template -struct ei_homogeneous_left_product_impl,Lhs> - : public ReturnByValue,Lhs> > +struct traits,Lhs> > { - typedef typename ei_cleantype::type LhsNested; + typedef typename take_matrix_for_product::type LhsMatrixType; + typedef typename remove_all::type MatrixTypeCleaned; + typedef typename remove_all::type LhsMatrixTypeCleaned; + typedef typename make_proper_matrix_type< + typename traits::Scalar, + LhsMatrixTypeCleaned::RowsAtCompileTime, + MatrixTypeCleaned::ColsAtCompileTime, + MatrixTypeCleaned::PlainObject::Options, + LhsMatrixTypeCleaned::MaxRowsAtCompileTime, + MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType; +}; + +template +struct homogeneous_left_product_impl,Lhs> + : public ReturnByValue,Lhs> > +{ + typedef typename traits::LhsMatrixType LhsMatrixType; + typedef typename remove_all::type LhsMatrixTypeCleaned; + typedef typename remove_all::type LhsMatrixTypeNested; typedef typename MatrixType::Index Index; - ei_homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) - : m_lhs(lhs), m_rhs(rhs) + homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) + : m_lhs(take_matrix_for_product::run(lhs)), + m_rhs(rhs) {} inline Index rows() const { return m_lhs.rows(); } @@ -238,22 +262,22 @@ struct ei_homogeneous_left_product_impl,Lhs> template void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible - dst = Block + dst = Block (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs; dst += m_lhs.col(m_lhs.cols()-1).rowwise() .template replicate(m_rhs.cols()); } - const typename Lhs::Nested m_lhs; + const typename LhsMatrixTypeCleaned::Nested m_lhs; const typename MatrixType::Nested m_rhs; }; template -struct ei_traits,Rhs> > +struct traits,Rhs> > { - typedef typename ei_make_proper_matrix_type::Scalar, + typedef typename make_proper_matrix_type::Scalar, MatrixType::RowsAtCompileTime, Rhs::ColsAtCompileTime, MatrixType::PlainObject::Options, @@ -262,12 +286,12 @@ struct ei_traits -struct ei_homogeneous_right_product_impl,Rhs> - : public ReturnByValue,Rhs> > +struct homogeneous_right_product_impl,Rhs> + : public ReturnByValue,Rhs> > { - typedef typename ei_cleantype::type RhsNested; + typedef typename remove_all::type RhsNested; typedef typename MatrixType::Index Index; - ei_homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) + homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {} @@ -277,7 +301,7 @@ struct ei_homogeneous_right_product_impl,Rhs> template void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible - dst = m_lhs * Block (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols()); @@ -289,4 +313,6 @@ struct ei_homogeneous_right_product_impl,Rhs> const typename Rhs::Nested m_rhs; }; +} // end namespace internal + #endif // EIGEN_HOMOGENEOUS_H diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Hyperplane.h b/gtsam/3rdparty/Eigen/src/Geometry/Hyperplane.h index 7b7d33a92..e43c9d07d 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Hyperplane.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Hyperplane.h @@ -43,23 +43,32 @@ * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) * and \f$ d \f$ is the distance (offset) to the origin. */ -template +template class Hyperplane { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) - enum { AmbientDimAtCompileTime = _AmbientDim }; + enum { + AmbientDimAtCompileTime = _AmbientDim, + Options = _Options + }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef DenseIndex Index; typedef Matrix VectorType; typedef Matrix Coefficients; + : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients; typedef Block NormalReturnType; + typedef const Block ConstNormalReturnType; /** Default constructor without initialization */ inline explicit Hyperplane() {} + + template + Hyperplane(const Hyperplane& other) + : m_coeffs(other.coeffs()) + {} /** Constructs a dynamic-size hyperplane with \a _dim the dimension * of the ambient space */ @@ -139,7 +148,7 @@ public: /** \returns the absolute distance between the plane \c *this and a point \a p. * \sa signedDistance() */ - inline Scalar absDistance(const VectorType& p) const { return ei_abs(signedDistance(p)); } + inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); } /** \returns the projection of a point \a p onto the plane \c *this. */ @@ -148,7 +157,7 @@ public: /** \returns a constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ - inline const NormalReturnType normal() const { return NormalReturnType(m_coeffs,0,0,dim(),1); } + inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. @@ -186,9 +195,9 @@ public: Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests // whether the two lines are approximately parallel. - if(ei_isMuchSmallerThan(det, Scalar(1))) + if(internal::isMuchSmallerThan(det, Scalar(1))) { // special case where the two lines are approximately parallel. Pick any point on the first line. - if(ei_abs(coeffs().coeff(1))>ei_abs(coeffs().coeff(0))) + if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0))) return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); else return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); @@ -216,7 +225,7 @@ public: normal() = mat * normal(); else { - ei_assert("invalid traits value in Hyperplane::transform()"); + eigen_assert("invalid traits value in Hyperplane::transform()"); } return *this; } @@ -228,7 +237,8 @@ public: * or a more generic Affine transformation. The default is Affine. * Other kind of transformations are not supported. */ - inline Hyperplane& transform(const Transform& t, + template + inline Hyperplane& transform(const Transform& t, TransformTraits traits = Affine) { transform(t.linear(), traits); @@ -242,23 +252,24 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const + inline typename internal::cast_return_type >::type cast() const { - return typename ei_cast_return_type >::type(*this); + return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ - template - inline explicit Hyperplane(const Hyperplane& other) + template + inline explicit Hyperplane(const Hyperplane& other) { m_coeffs = other.coeffs().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ - bool isApprox(const Hyperplane& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const + template + bool isApprox(const Hyperplane& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } protected: diff --git a/gtsam/3rdparty/Eigen/src/Geometry/OrthoMethods.h b/gtsam/3rdparty/Eigen/src/Geometry/OrthoMethods.h index d03d85beb..52b469881 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/OrthoMethods.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/OrthoMethods.h @@ -35,7 +35,7 @@ */ template template -inline typename MatrixBase::PlainObject +inline typename MatrixBase::template cross_product_return_type::type MatrixBase::cross(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3) @@ -43,31 +43,35 @@ MatrixBase::cross(const MatrixBase& other) const // Note that there is no need for an expression here since the compiler // optimize such a small temporary very well (even within a complex expression) - const typename ei_nested::type lhs(derived()); - const typename ei_nested::type rhs(other.derived()); - return typename ei_plain_matrix_type::type( - lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1), - lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2), - lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0) + const typename internal::nested::type lhs(derived()); + const typename internal::nested::type rhs(other.derived()); + return typename cross_product_return_type::type( + internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), + internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), + internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)) ); } +namespace internal { + template< int Arch,typename VectorLhs,typename VectorRhs, typename Scalar = typename VectorLhs::Scalar, bool Vectorizable = (VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit> -struct ei_cross3_impl { - inline static typename ei_plain_matrix_type::type +struct cross3_impl { + inline static typename internal::plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { - return typename ei_plain_matrix_type::type( - lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1), - lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2), - lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0), + return typename internal::plain_matrix_type::type( + internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), + internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), + internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)), 0 ); } }; +} + /** \geometry_module * * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients @@ -85,14 +89,14 @@ MatrixBase::cross3(const MatrixBase& other) const EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4) - typedef typename ei_nested::type DerivedNested; - typedef typename ei_nested::type OtherDerivedNested; + typedef typename internal::nested::type DerivedNested; + typedef typename internal::nested::type OtherDerivedNested; const DerivedNested lhs(derived()); const OtherDerivedNested rhs(other.derived()); - return ei_cross3_impl::type, - typename ei_cleantype::type>::run(lhs,rhs); + return internal::cross3_impl::type, + typename internal::remove_all::type>::run(lhs,rhs); } /** \returns a matrix expression of the cross product of each column or row @@ -110,32 +114,34 @@ const typename VectorwiseOp::CrossReturnType VectorwiseOp::cross(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) CrossReturnType res(_expression().rows(),_expression().cols()); if(Direction==Vertical) { - ei_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows"); - res.row(0) = _expression().row(1) * other.coeff(2) - _expression().row(2) * other.coeff(1); - res.row(1) = _expression().row(2) * other.coeff(0) - _expression().row(0) * other.coeff(2); - res.row(2) = _expression().row(0) * other.coeff(1) - _expression().row(1) * other.coeff(0); + eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows"); + res.row(0) = (_expression().row(1) * other.coeff(2) - _expression().row(2) * other.coeff(1)).conjugate(); + res.row(1) = (_expression().row(2) * other.coeff(0) - _expression().row(0) * other.coeff(2)).conjugate(); + res.row(2) = (_expression().row(0) * other.coeff(1) - _expression().row(1) * other.coeff(0)).conjugate(); } else { - ei_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns"); - res.col(0) = _expression().col(1) * other.coeff(2) - _expression().col(2) * other.coeff(1); - res.col(1) = _expression().col(2) * other.coeff(0) - _expression().col(0) * other.coeff(2); - res.col(2) = _expression().col(0) * other.coeff(1) - _expression().col(1) * other.coeff(0); + eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns"); + res.col(0) = (_expression().col(1) * other.coeff(2) - _expression().col(2) * other.coeff(1)).conjugate(); + res.col(1) = (_expression().col(2) * other.coeff(0) - _expression().col(0) * other.coeff(2)).conjugate(); + res.col(2) = (_expression().col(0) * other.coeff(1) - _expression().col(1) * other.coeff(0)).conjugate(); } return res; } +namespace internal { + template -struct ei_unitOrthogonal_selector +struct unitOrthogonal_selector { - typedef typename ei_plain_matrix_type::type VectorType; - typedef typename ei_traits::Scalar Scalar; + typedef typename plain_matrix_type::type VectorType; + typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; typedef Matrix Vector2; @@ -148,18 +154,18 @@ struct ei_unitOrthogonal_selector if (maxi==0) sndi = 1; RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm(); - perp.coeffRef(maxi) = -ei_conj(src.coeff(sndi)) * invnm; - perp.coeffRef(sndi) = ei_conj(src.coeff(maxi)) * invnm; + perp.coeffRef(maxi) = -conj(src.coeff(sndi)) * invnm; + perp.coeffRef(sndi) = conj(src.coeff(maxi)) * invnm; return perp; } }; template -struct ei_unitOrthogonal_selector +struct unitOrthogonal_selector { - typedef typename ei_plain_matrix_type::type VectorType; - typedef typename ei_traits::Scalar Scalar; + typedef typename plain_matrix_type::type VectorType; + typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; inline static VectorType run(const Derived& src) { @@ -171,12 +177,12 @@ struct ei_unitOrthogonal_selector /* unless the x and y coords are both close to zero, we can * simply take ( -y, x, 0 ) and normalize it. */ - if((!ei_isMuchSmallerThan(src.x(), src.z())) - || (!ei_isMuchSmallerThan(src.y(), src.z()))) + if((!isMuchSmallerThan(src.x(), src.z())) + || (!isMuchSmallerThan(src.y(), src.z()))) { RealScalar invnm = RealScalar(1)/src.template head<2>().norm(); - perp.coeffRef(0) = -ei_conj(src.y())*invnm; - perp.coeffRef(1) = ei_conj(src.x())*invnm; + perp.coeffRef(0) = -conj(src.y())*invnm; + perp.coeffRef(1) = conj(src.x())*invnm; perp.coeffRef(2) = 0; } /* if both x and y are close to zero, then the vector is close @@ -187,8 +193,8 @@ struct ei_unitOrthogonal_selector { RealScalar invnm = RealScalar(1)/src.template tail<2>().norm(); perp.coeffRef(0) = 0; - perp.coeffRef(1) = -ei_conj(src.z())*invnm; - perp.coeffRef(2) = ei_conj(src.y())*invnm; + perp.coeffRef(1) = -conj(src.z())*invnm; + perp.coeffRef(2) = conj(src.y())*invnm; } return perp; @@ -196,13 +202,15 @@ struct ei_unitOrthogonal_selector }; template -struct ei_unitOrthogonal_selector +struct unitOrthogonal_selector { - typedef typename ei_plain_matrix_type::type VectorType; + typedef typename plain_matrix_type::type VectorType; inline static VectorType run(const Derived& src) - { return VectorType(-ei_conj(src.y()), ei_conj(src.x())).normalized(); } + { return VectorType(-conj(src.y()), conj(src.x())).normalized(); } }; +} // end namespace internal + /** \returns a unit vector which is orthogonal to \c *this * * The size of \c *this must be at least 2. If the size is exactly 2, @@ -215,7 +223,7 @@ typename MatrixBase::PlainObject MatrixBase::unitOrthogonal() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return ei_unitOrthogonal_selector::run(derived()); + return internal::unitOrthogonal_selector::run(derived()); } #endif // EIGEN_ORTHOMETHODS_H diff --git a/gtsam/3rdparty/Eigen/src/Geometry/ParametrizedLine.h b/gtsam/3rdparty/Eigen/src/Geometry/ParametrizedLine.h index 3de23f53b..edad5f8ee 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/ParametrizedLine.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/ParametrizedLine.h @@ -39,19 +39,27 @@ * \param _Scalar the scalar type, i.e., the type of the coefficients * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. */ -template +template class ParametrizedLine { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) - enum { AmbientDimAtCompileTime = _AmbientDim }; + enum { + AmbientDimAtCompileTime = _AmbientDim, + Options = _Options + }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef DenseIndex Index; - typedef Matrix VectorType; + typedef Matrix VectorType; /** Default constructor without initialization */ inline explicit ParametrizedLine() {} + + template + ParametrizedLine(const ParametrizedLine& other) + : m_origin(other.origin()), m_direction(other.direction()) + {} /** Constructs a dynamic-size line with \a _dim the dimension * of the ambient space */ @@ -63,7 +71,8 @@ public: ParametrizedLine(const VectorType& origin, const VectorType& direction) : m_origin(origin), m_direction(direction) {} - explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane); + template + explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); /** Constructs a parametrized line going from \a p0 to \a p1. */ static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) @@ -91,13 +100,14 @@ public: /** \returns the distance of a point \a p to its projection onto the line \c *this. * \sa squaredDistance() */ - RealScalar distance(const VectorType& p) const { return ei_sqrt(squaredDistance(p)); } + RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); } /** \returns the projection of a point \a p onto the line \c *this. */ VectorType projection(const VectorType& p) const { return origin() + direction().dot(p-origin()) * direction(); } - Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane); + template + Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); /** \returns \c *this with scalar type casted to \a NewScalarType * @@ -105,16 +115,16 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const + inline typename internal::cast_return_type >::type cast() const { - return typename ei_cast_return_type >::type(*this); + return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ - template - inline explicit ParametrizedLine(const ParametrizedLine& other) + template + inline explicit ParametrizedLine(const ParametrizedLine& other) { m_origin = other.origin().template cast(); m_direction = other.direction().template cast(); @@ -136,8 +146,9 @@ protected: * * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line */ -template -inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane) +template +template +inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) direction() = hyperplane.normal().unitOrthogonal(); @@ -146,8 +157,9 @@ inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane /** \returns the parameter value of the intersection between \c *this and the given hyperplane */ -template -inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane) +template +template +inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) { return -(hyperplane.offset()+hyperplane.normal().dot(origin())) / hyperplane.normal().dot(direction()); diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Quaternion.h b/gtsam/3rdparty/Eigen/src/Geometry/Quaternion.h index c0845653d..ecf839335 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Quaternion.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Quaternion.h @@ -31,10 +31,12 @@ * The implementation is at the end of the file ***************************************************************************/ +namespace internal { template -struct ei_quaternionbase_assign_impl; +struct quaternionbase_assign_impl; +} template class QuaternionBase : public RotationBase @@ -44,9 +46,9 @@ public: using Base::operator*; using Base::derived; - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename ei_traits::Coefficients Coefficients; + typedef typename internal::traits::Coefficients Coefficients; // typedef typename Matrix Coefficients; /** the type of a 3D vector */ @@ -77,16 +79,16 @@ public: inline Scalar& w() { return this->derived().coeffs().coeffRef(3); } /** \returns a read-only vector expression of the imaginary part (x,y,z) */ - inline const VectorBlock vec() const { return coeffs().template head<3>(); } + inline const VectorBlock vec() const { return coeffs().template head<3>(); } /** \returns a vector expression of the imaginary part (x,y,z) */ inline VectorBlock vec() { return coeffs().template head<3>(); } /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ - inline const typename ei_traits::Coefficients& coeffs() const { return derived().coeffs(); } + inline const typename internal::traits::Coefficients& coeffs() const { return derived().coeffs(); } /** \returns a vector expression of the coefficients (x,y,z,w) */ - inline typename ei_traits::Coefficients& coeffs() { return derived().coeffs(); } + inline typename internal::traits::Coefficients& coeffs() { return derived().coeffs(); } EIGEN_STRONG_INLINE QuaternionBase& operator=(const QuaternionBase& other); template EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase& other); @@ -175,9 +177,9 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const + inline typename internal::cast_return_type >::type cast() const { - return typename ei_cast_return_type >::type( + return typename internal::cast_return_type >::type( coeffs().template cast()); } @@ -212,27 +214,29 @@ public: * \sa class AngleAxis, class Transform */ -template -struct ei_traits > +namespace internal { +template +struct traits > { - typedef Quaternion<_Scalar> PlainObject; + typedef Quaternion<_Scalar,_Options> PlainObject; typedef _Scalar Scalar; - typedef Matrix<_Scalar,4,1> Coefficients; + typedef Matrix<_Scalar,4,1,_Options> Coefficients; enum{ - PacketAccess = Aligned + PacketAccess = _Options & DontAlign ? Unaligned : Aligned }; }; +} -template -class Quaternion : public QuaternionBase >{ - typedef QuaternionBase > Base; +template +class Quaternion : public QuaternionBase >{ + typedef QuaternionBase > Base; public: typedef _Scalar Scalar; - EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion) + EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion) using Base::operator*=; - typedef typename ei_traits >::Coefficients Coefficients; + typedef typename internal::traits >::Coefficients Coefficients; typedef typename Base::AngleAxisType AngleAxisType; /** Default constructor leaving the quaternion uninitialized. */ @@ -268,6 +272,14 @@ public: protected: Coefficients m_coeffs; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + EIGEN_STRONG_INLINE static void _check_template_params() + { + EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options, + INVALID_MATRIX_TEMPLATE_PARAMETERS) + } +#endif }; /** \ingroup Geometry_Module @@ -281,9 +293,10 @@ typedef Quaternion Quaterniond; * Specialization of Map> ***************************************************************************/ +namespace internal { template -struct ei_traits, _PacketAccess> >: -ei_traits > +struct traits, _PacketAccess> >: +traits > { typedef _Scalar Scalar; typedef Map, _PacketAccess> Coefficients; @@ -291,6 +304,43 @@ ei_traits > PacketAccess = _PacketAccess }; }; +} + +/** \brief Quaternion expression mapping a constant memory buffer + * + * \param _Scalar the type of the Quaternion coefficients + * \param PacketAccess see class Map + * + * This is a specialization of class Map for Quaternion. This class allows to view + * a 4 scalar memory buffer as an Eigen's Quaternion object. + * + * \sa class Map, class Quaternion, class QuaternionBase + */ +template +class Map, PacketAccess > + : public QuaternionBase, PacketAccess> > +{ + typedef QuaternionBase, PacketAccess> > Base; + + public: + typedef _Scalar Scalar; + typedef typename internal::traits::Coefficients Coefficients; + EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) + using Base::operator*=; + + /** Constructs a Mapped Quaternion object from the pointer \a coeffs + * + * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order: + * \code *coeffs == {x, y, z, w} \endcode + * + * If the template parameter PacketAccess is set to Aligned, then the pointer coeffs must be aligned. */ + EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {} + + inline const Coefficients& coeffs() const { return m_coeffs;} + + protected: + const Coefficients m_coeffs; +}; /** \brief Expression of a quaternion from a memory buffer * @@ -310,7 +360,7 @@ class Map, PacketAccess > public: typedef _Scalar Scalar; - typedef typename ei_traits::Coefficients Coefficients; + typedef typename internal::traits::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) using Base::operator*=; @@ -320,10 +370,10 @@ class Map, PacketAccess > * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter PacketAccess is set to Aligned, then the pointer coeffs must be aligned. */ - EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {} + EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {} - inline Coefficients& coeffs() { return m_coeffs;} - inline const Coefficients& coeffs() const { return m_coeffs;} + inline Coefficients& coeffs() { return m_coeffs; } + inline const Coefficients& coeffs() const { return m_coeffs; } protected: Coefficients m_coeffs; @@ -348,7 +398,8 @@ typedef Map, Aligned> QuaternionMapAlignedd; // Generic Quaternion * Quaternion product // This product can be specialized for a given architecture via the Arch template argument. -template struct ei_quat_product +namespace internal { +template struct quat_product { EIGEN_STRONG_INLINE static Quaternion run(const QuaternionBase& a, const QuaternionBase& b){ return Quaternion @@ -360,18 +411,19 @@ template template -EIGEN_STRONG_INLINE Quaternion::Scalar> +EIGEN_STRONG_INLINE Quaternion::Scalar> QuaternionBase::operator* (const QuaternionBase& other) const { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - return ei_quat_product::Scalar, - ei_traits::PacketAccess && ei_traits::PacketAccess>::run(*this, other); + return internal::quat_product::Scalar, + internal::traits::PacketAccess && internal::traits::PacketAccess>::run(*this, other); } /** \sa operator*(Quaternion) */ @@ -425,8 +477,8 @@ template EIGEN_STRONG_INLINE Derived& QuaternionBase::operator=(const AngleAxisType& aa) { Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings - this->w() = ei_cos(ha); - this->vec() = ei_sin(ha) * aa.axis(); + this->w() = internal::cos(ha); + this->vec() = internal::sin(ha) * aa.axis(); return derived(); } @@ -440,9 +492,9 @@ template template inline Derived& QuaternionBase::operator=(const MatrixBase& xpr) { - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - ei_quaternionbase_assign_impl::run(*this, xpr.derived()); + internal::quaternionbase_assign_impl::run(*this, xpr.derived()); return derived(); } @@ -519,12 +571,12 @@ inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBasew() = ei_sqrt(w2); - this->vec() = axis * ei_sqrt(Scalar(1) - w2); + this->w() = internal::sqrt(w2); + this->vec() = axis * internal::sqrt(Scalar(1) - w2); return derived(); } Vector3 axis = v0.cross(v1); - Scalar s = ei_sqrt((Scalar(1)+c)*Scalar(2)); + Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2)); Scalar invs = Scalar(1)/s; this->vec() = axis * invs; this->w() = s * Scalar(0.5); @@ -539,7 +591,7 @@ inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase -inline Quaternion::Scalar> QuaternionBase::inverse() const +inline Quaternion::Scalar> QuaternionBase::inverse() const { // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? Scalar n2 = this->squaredNorm(); @@ -559,7 +611,7 @@ inline Quaternion::Scalar> QuaternionBase:: * \sa Quaternion2::inverse() */ template -inline Quaternion::Scalar> +inline Quaternion::Scalar> QuaternionBase::conjugate() const { return Quaternion(this->w(),-this->x(),-this->y(),-this->z()); @@ -570,10 +622,10 @@ QuaternionBase::conjugate() const */ template template -inline typename ei_traits::Scalar +inline typename internal::traits::Scalar QuaternionBase::angularDistance(const QuaternionBase& other) const { - double d = ei_abs(this->dot(other)); + double d = internal::abs(this->dot(other)); if (d>=1.0) return Scalar(0); return static_cast(2 * std::acos(d)); @@ -584,12 +636,12 @@ QuaternionBase::angularDistance(const QuaternionBase& oth */ template template -Quaternion::Scalar> +Quaternion::Scalar> QuaternionBase::slerp(Scalar t, const QuaternionBase& other) const { static const Scalar one = Scalar(1) - NumTraits::epsilon(); Scalar d = this->dot(other); - Scalar absD = ei_abs(d); + Scalar absD = internal::abs(d); Scalar scale0; Scalar scale1; @@ -603,10 +655,10 @@ QuaternionBase::slerp(Scalar t, const QuaternionBase& oth { // theta is the angle between the 2 quaternions Scalar theta = std::acos(absD); - Scalar sinTheta = ei_sin(theta); + Scalar sinTheta = internal::sin(theta); - scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta; - scale1 = ei_sin( ( t * theta) ) / sinTheta; + scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta; + scale1 = internal::sin( ( t * theta) ) / sinTheta; if (d<0) scale1 = -scale1; } @@ -614,9 +666,11 @@ QuaternionBase::slerp(Scalar t, const QuaternionBase& oth return Quaternion(scale0 * coeffs() + scale1 * other.coeffs()); } +namespace internal { + // set from a rotation matrix template -struct ei_quaternionbase_assign_impl +struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; typedef DenseIndex Index; @@ -627,7 +681,7 @@ struct ei_quaternionbase_assign_impl Scalar t = mat.trace(); if (t > Scalar(0)) { - t = ei_sqrt(t + Scalar(1.0)); + t = sqrt(t + Scalar(1.0)); q.w() = Scalar(0.5)*t; t = Scalar(0.5)/t; q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; @@ -644,7 +698,7 @@ struct ei_quaternionbase_assign_impl DenseIndex j = (i+1)%3; DenseIndex k = (j+1)%3; - t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); + t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); q.coeffs().coeffRef(i) = Scalar(0.5) * t; t = Scalar(0.5)/t; q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; @@ -656,7 +710,7 @@ struct ei_quaternionbase_assign_impl // set from a vector of coefficients assumed to be a quaternion template -struct ei_quaternionbase_assign_impl +struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; template inline static void run(QuaternionBase& q, const Other& vec) @@ -665,5 +719,6 @@ struct ei_quaternionbase_assign_impl } }; +} // end namespace internal #endif // EIGEN_QUATERNION_H diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Rotation2D.h b/gtsam/3rdparty/Eigen/src/Geometry/Rotation2D.h index c65b4b6e0..e1214bd3e 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Rotation2D.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Rotation2D.h @@ -41,10 +41,14 @@ * * \sa class Quaternion, class Transform */ -template struct ei_traits > + +namespace internal { + +template struct traits > { typedef _Scalar Scalar; }; +} // end namespace internal template class Rotation2D : public RotationBase,2> @@ -107,8 +111,8 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const - { return typename ei_cast_return_type >::type(*this); } + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template @@ -124,7 +128,7 @@ public: * * \sa MatrixBase::isApprox() */ bool isApprox(const Rotation2D& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const - { return ei_isApprox(m_angle,other.m_angle, prec); } + { return internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module @@ -143,7 +147,7 @@ template Rotation2D& Rotation2D::fromRotationMatrix(const MatrixBase& mat) { EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) - m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0)); + m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0)); return *this; } @@ -153,8 +157,8 @@ template typename Rotation2D::Matrix2 Rotation2D::toRotationMatrix(void) const { - Scalar sinA = ei_sin(m_angle); - Scalar cosA = ei_cos(m_angle); + Scalar sinA = internal::sin(m_angle); + Scalar cosA = internal::cos(m_angle); return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); } diff --git a/gtsam/3rdparty/Eigen/src/Geometry/RotationBase.h b/gtsam/3rdparty/Eigen/src/Geometry/RotationBase.h index 181e65be9..1abf06bb6 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/RotationBase.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/RotationBase.h @@ -26,8 +26,10 @@ #define EIGEN_ROTATIONBASE_H // forward declaration +namespace internal { template -struct ei_rotation_base_generic_product_selector; +struct rotation_base_generic_product_selector; +} /** \class RotationBase * @@ -42,7 +44,7 @@ class RotationBase public: enum { Dim = _Dim }; /** the scalar type of the coefficients */ - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; /** corresponding linear transformation matrix type */ typedef Matrix RotationMatrixType; @@ -78,9 +80,9 @@ class RotationBase * - a vector of size Dim */ template - EIGEN_STRONG_INLINE typename ei_rotation_base_generic_product_selector::ReturnType + EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector::ReturnType operator*(const EigenBase& e) const - { return ei_rotation_base_generic_product_selector::run(derived(), e.derived()); } + { return internal::rotation_base_generic_product_selector::run(derived(), e.derived()); } /** \returns the concatenation of a linear transformation \a l with the rotation \a r */ template friend @@ -96,8 +98,8 @@ class RotationBase } /** \returns the concatenation of the rotation \c *this with a transformation \a t */ - template - inline Transform operator*(const Transform& t) const + template + inline Transform operator*(const Transform& t) const { return toRotationMatrix() * t; } template @@ -105,9 +107,11 @@ class RotationBase { return toRotationMatrix() * v; } }; +namespace internal { + // implementation of the generic product rotation * matrix template -struct ei_rotation_base_generic_product_selector +struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; @@ -116,7 +120,7 @@ struct ei_rotation_base_generic_product_selector -struct ei_rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix, false > +struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix, false > { typedef Transform ReturnType; inline static ReturnType run(const RotationDerived& r, const DiagonalMatrix& m) @@ -128,7 +132,7 @@ struct ei_rotation_base_generic_product_selector< RotationDerived, DiagonalMatri }; template -struct ei_rotation_base_generic_product_selector +struct rotation_base_generic_product_selector { enum { Dim = RotationDerived::Dim }; typedef Matrix ReturnType; @@ -138,6 +142,8 @@ struct ei_rotation_base_generic_product_selector return *this = r.toRotationMatrix(); } +namespace internal { + /** \internal * * Helper function to return an arbitrary rotation object to a rotation matrix. @@ -179,29 +187,31 @@ Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> * - any matrix expression, * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) * - * Currently ei_toRotationMatrix is only used by Transform. + * Currently toRotationMatrix is only used by Transform. * * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis */ template -inline static Matrix ei_toRotationMatrix(const Scalar& s) +inline static Matrix toRotationMatrix(const Scalar& s) { EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) return Rotation2D(s).toRotationMatrix(); } template -inline static Matrix ei_toRotationMatrix(const RotationBase& r) +inline static Matrix toRotationMatrix(const RotationBase& r) { return r.toRotationMatrix(); } template -inline static const MatrixBase& ei_toRotationMatrix(const MatrixBase& mat) +inline static const MatrixBase& toRotationMatrix(const MatrixBase& mat) { EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, YOU_MADE_A_PROGRAMMING_MISTAKE) return mat; } +} // end namespace internal + #endif // EIGEN_ROTATIONBASE_H diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Scaling.h b/gtsam/3rdparty/Eigen/src/Geometry/Scaling.h index 8fdbdb102..c911d13e1 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Scaling.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Scaling.h @@ -72,13 +72,13 @@ public: inline Transform operator* (const Translation& t) const; /** Concatenates a uniform scaling and an affine transformation */ - template - inline Transform operator* (const Transform& t) const; + template + inline Transform operator* (const Transform& t) const; /** Concatenates a uniform scaling and a linear transformation matrix */ // TODO returns an expression template - inline typename ei_plain_matrix_type::type operator* (const MatrixBase& other) const + inline typename internal::plain_matrix_type::type operator* (const MatrixBase& other) const { return other * m_factor; } template @@ -108,7 +108,7 @@ public: * * \sa MatrixBase::isApprox() */ bool isApprox(const UniformScaling& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const - { return ei_isApprox(m_factor, other.factor(), prec); } + { return internal::isApprox(m_factor, other.factor(), prec); } }; @@ -141,7 +141,7 @@ static inline DiagonalMatrix Scaling(Scalar sx, Scalar sy, Scalar sz) * This is an alias for coeffs.asDiagonal() */ template -static inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) +static inline const DiagonalWrapper Scaling(const MatrixBase& coeffs) { return coeffs.asDiagonal(); } /** \addtogroup Geometry_Module */ @@ -170,9 +170,9 @@ UniformScaling::operator* (const Translation& t) const } template -template +template inline Transform -UniformScaling::operator* (const Transform& t) const +UniformScaling::operator* (const Transform& t) const { Transform res = t; res.prescale(factor()); diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Transform.h b/gtsam/3rdparty/Eigen/src/Geometry/Transform.h index e8099495d..289077c1b 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Transform.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Transform.h @@ -27,8 +27,10 @@ #ifndef EIGEN_TRANSFORM_H #define EIGEN_TRANSFORM_H +namespace internal { + template -struct ei_transform_traits +struct transform_traits { enum { @@ -41,33 +43,39 @@ struct ei_transform_traits template< typename TransformType, typename MatrixType, - bool IsProjective = ei_transform_traits::IsProjective> -struct ei_transform_right_product_impl; + int Case = transform_traits::IsProjective ? 0 + : int(MatrixType::RowsAtCompileTime) == int(transform_traits::HDim) ? 1 + : 2> +struct transform_right_product_impl; template< typename Other, int Mode, + int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> -struct ei_transform_left_product_impl; +struct transform_left_product_impl; template< typename Lhs, typename Rhs, bool AnyProjective = - ei_transform_traits::IsProjective || - ei_transform_traits::IsProjective> -struct ei_transform_transform_product_impl; + transform_traits::IsProjective || + transform_traits::IsProjective> +struct transform_transform_product_impl; template< typename Other, int Mode, + int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> -struct ei_transform_construct_from_matrix; +struct transform_construct_from_matrix; -template struct ei_transform_take_affine_part; +template struct transform_take_affine_part; + +} // end namespace internal /** \geometry_module \ingroup Geometry_Module * @@ -75,15 +83,16 @@ template struct ei_transform_take_affine_part; * * \brief Represents an homogeneous transformation in a N dimensional space * - * \param _Scalar the scalar type, i.e., the type of the coefficients - * \param _Dim the dimension of the space - * \param _Mode the type of the transformation. Can be: + * \tparam _Scalar the scalar type, i.e., the type of the coefficients + * \tparam _Dim the dimension of the space + * \tparam _Mode the type of the transformation. Can be: * - Affine: the transformation is stored as a (Dim+1)^2 matrix, * where the last row is assumed to be [0 ... 0 1]. - * This is the default. * - AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix. * - Projective: the transformation is stored as a (Dim+1)^2 matrix * without any assumption. + * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor. + * These Options are passed directly to the underlying matrix type. * * The homography is internally represented and stored by a matrix which * is available through the matrix() method. To understand the behavior of @@ -171,15 +180,19 @@ template struct ei_transform_take_affine_part; * Conversion methods from/to Qt's QMatrix and QTransform are available if the * preprocessor token EIGEN_QT_SUPPORT is defined. * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN. + * * \sa class Matrix, class Quaternion */ -template +template class Transform { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) enum { Mode = _Mode, + Options = _Options, Dim = _Dim, ///< space dimension in which the transformation holds HDim = _Dim+1, ///< size of a respective homogeneous vector Rows = int(Mode)==(AffineCompact) ? Dim : HDim @@ -188,25 +201,36 @@ public: typedef _Scalar Scalar; typedef DenseIndex Index; /** type of the matrix used to represent the transformation */ - typedef Matrix MatrixType; + typedef typename internal::make_proper_matrix_type::type MatrixType; + /** constified MatrixType */ + typedef const MatrixType ConstMatrixType; /** type of the matrix used to represent the linear part of the transformation */ - typedef Matrix LinearMatrixType; + typedef Matrix LinearMatrixType; /** type of read/write reference to the linear part of the transformation */ typedef Block LinearPart; + /** type of read reference to the linear part of the transformation */ + typedef const Block ConstLinearPart; /** type of read/write reference to the affine part of the transformation */ - typedef typename ei_meta_if >::ret AffinePart; - /** type of read/write reference to the affine part of the transformation */ - typedef typename ei_meta_if >::ret AffinePartNested; + Block >::type AffinePart; + /** type of read reference to the affine part of the transformation */ + typedef typename internal::conditional >::type ConstAffinePart; /** type of a vector */ typedef Matrix VectorType; /** type of a read/write reference to the translation part of the rotation */ typedef Block TranslationPart; + /** type of a read reference to the translation part of the rotation */ + typedef const Block ConstTranslationPart; /** corresponding translation type */ typedef Translation TranslationType; + + // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0 + enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) }; + /** The return type of the product between a diagonal matrix and a transform */ + typedef Transform TransformTimeDiagonalReturnType; protected: @@ -218,43 +242,67 @@ public: * If Mode==Affine, then the last row is set to [0 ... 0 1] */ inline Transform() { + check_template_params(); if (int(Mode)==Affine) makeAffine(); } inline Transform(const Transform& other) { + check_template_params(); m_matrix = other.m_matrix; } - inline explicit Transform(const TranslationType& t) { *this = t; } - inline explicit Transform(const UniformScaling& s) { *this = s; } + inline explicit Transform(const TranslationType& t) + { + check_template_params(); + *this = t; + } + inline explicit Transform(const UniformScaling& s) + { + check_template_params(); + *this = s; + } template - inline explicit Transform(const RotationBase& r) { *this = r; } + inline explicit Transform(const RotationBase& r) + { + check_template_params(); + *this = r; + } inline Transform& operator=(const Transform& other) { m_matrix = other.m_matrix; return *this; } - typedef ei_transform_take_affine_part take_affine_part; + typedef internal::transform_take_affine_part take_affine_part; /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ template inline explicit Transform(const EigenBase& other) { - ei_transform_construct_from_matrix::run(this, other.derived()); + check_template_params(); + internal::transform_construct_from_matrix::run(this, other.derived()); } /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */ template inline Transform& operator=(const EigenBase& other) { - ei_transform_construct_from_matrix::run(this, other.derived()); + internal::transform_construct_from_matrix::run(this, other.derived()); return *this; } - - template - inline Transform(const Transform& other) + + template + inline Transform(const Transform& other) { + check_template_params(); + // only the options change, we can directly copy the matrices + m_matrix = other.matrix(); + } + + template + inline Transform(const Transform& other) + { + check_template_params(); // prevent conversions as: // Affine | AffineCompact | Isometry = Projective EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)), @@ -279,8 +327,8 @@ public: } else if(OtherModeIsAffineCompact) { - typedef typename Transform::MatrixType OtherMatrixType; - ei_transform_construct_from_matrix::run(this, other.matrix()); + typedef typename Transform::MatrixType OtherMatrixType; + internal::transform_construct_from_matrix::run(this, other.matrix()); } else { @@ -295,6 +343,7 @@ public: template Transform(const ReturnByValue& other) { + check_template_params(); other.evalTo(*this); } @@ -327,17 +376,17 @@ public: inline MatrixType& matrix() { return m_matrix; } /** \returns a read-only expression of the linear part of the transformation */ - inline const LinearPart linear() const { return m_matrix.template block(0,0); } + inline ConstLinearPart linear() const { return m_matrix.template block(0,0); } /** \returns a writable expression of the linear part of the transformation */ inline LinearPart linear() { return m_matrix.template block(0,0); } /** \returns a read-only expression of the Dim x HDim affine part of the transformation */ - inline const AffinePart affine() const { return take_affine_part::run(m_matrix); } + inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); } /** \returns a writable expression of the Dim x HDim affine part of the transformation */ inline AffinePart affine() { return take_affine_part::run(m_matrix); } /** \returns a read-only expression of the translation vector of the transformation */ - inline const TranslationPart translation() const { return m_matrix.template block(0,Dim); } + inline ConstTranslationPart translation() const { return m_matrix.template block(0,Dim); } /** \returns a writable expression of the translation vector of the transformation */ inline TranslationPart translation() { return m_matrix.template block(0,Dim); } @@ -354,9 +403,9 @@ public: */ // note: this function is defined here because some compilers cannot find the respective declaration template - EIGEN_STRONG_INLINE const typename ei_transform_right_product_impl::ResultType + EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl::ResultType operator * (const EigenBase &other) const - { return ei_transform_right_product_impl::run(*this,other.derived()); } + { return internal::transform_right_product_impl::run(*this,other.derived()); } /** \returns the product expression of a transformation matrix \a a times a transform \a b * @@ -366,9 +415,9 @@ public: * \li a general transformation matrix of size Dim+1 x Dim+1. */ template friend - inline const typename ei_transform_left_product_impl::ResultType + inline const typename internal::transform_left_product_impl::ResultType operator * (const EigenBase &a, const Transform &b) - { return ei_transform_left_product_impl::run(a.derived(),b); } + { return internal::transform_left_product_impl::run(a.derived(),b); } /** \returns The product expression of a transform \a a times a diagonal matrix \a b * @@ -376,10 +425,11 @@ public: * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ - friend inline const Transform - operator * (const Transform &a, const DiagonalMatrix &b) + template + inline const TransformTimeDiagonalReturnType + operator * (const DiagonalBase &b) const { - Transform res(a); + TransformTimeDiagonalReturnType res(*this); res.linear() *= b; return res; } @@ -390,10 +440,11 @@ public: * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ - friend inline const Transform - operator * (const DiagonalMatrix &a, const Transform &b) + template + friend inline TransformTimeDiagonalReturnType + operator * (const DiagonalBase &a, const Transform &b) { - Transform res; + TransformTimeDiagonalReturnType res; res.linear().noalias() = a*b.linear(); res.translation().noalias() = a*b.translation(); if (Mode!=int(AffineCompact)) @@ -407,16 +458,16 @@ public: /** Concatenates two transformations */ inline const Transform operator * (const Transform& other) const { - return ei_transform_transform_product_impl::run(*this,other); + return internal::transform_transform_product_impl::run(*this,other); } /** Concatenates two different transformations */ - template - inline const typename ei_transform_transform_product_impl< - Transform,Transform >::ResultType - operator * (const Transform& other) const + template + inline const typename internal::transform_transform_product_impl< + Transform,Transform >::ResultType + operator * (const Transform& other) const { - return ei_transform_transform_product_impl >::run(*this,other); + return internal::transform_transform_product_impl >::run(*this,other); } /** \sa MatrixBase::setIdentity() */ @@ -472,7 +523,7 @@ public: template inline Transform operator*(const RotationBase& r) const; - LinearMatrixType rotation() const; + const LinearMatrixType rotation() const; template void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; template @@ -495,13 +546,16 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const - { return typename ei_cast_return_type >::type(*this); } + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template - inline explicit Transform(const Transform& other) - { m_matrix = other.matrix().template cast(); } + inline explicit Transform(const Transform& other) + { + check_template_params(); + m_matrix = other.matrix().template cast(); + } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. @@ -551,6 +605,14 @@ public: #ifdef EIGEN_TRANSFORM_PLUGIN #include EIGEN_TRANSFORM_PLUGIN #endif + +protected: + #ifndef EIGEN_PARSED_BY_DOXYGEN + EIGEN_STRONG_INLINE static void check_template_params() + { + EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) + } + #endif }; @@ -599,9 +661,10 @@ typedef Transform Projective3d; * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ -template -Transform::Transform(const QMatrix& other) +template +Transform::Transform(const QMatrix& other) { + check_template_params(); *this = other; } @@ -609,8 +672,8 @@ Transform::Transform(const QMatrix& other) * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ -template -Transform& Transform::operator=(const QMatrix& other) +template +Transform& Transform::operator=(const QMatrix& other) { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) m_matrix << other.m11(), other.m21(), other.dx(), @@ -625,9 +688,10 @@ Transform& Transform::operator=(const QMatrix& * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ -template -QMatrix Transform::toQMatrix(void) const +template +QMatrix Transform::toQMatrix(void) const { + check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), @@ -638,9 +702,10 @@ QMatrix Transform::toQMatrix(void) const * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ -template -Transform::Transform(const QTransform& other) +template +Transform::Transform(const QTransform& other) { + check_template_params(); *this = other; } @@ -648,9 +713,10 @@ Transform::Transform(const QTransform& other) * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ -template -Transform& Transform::operator=(const QTransform& other) +template +Transform& Transform::operator=(const QTransform& other) { + check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), @@ -662,8 +728,8 @@ Transform& Transform::operator=(const QTransfo * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ -template -QTransform Transform::toQTransform(void) const +template +QTransform Transform::toQTransform(void) const { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) return QTransform(matrix.coeff(0,0), matrix.coeff(1,0), matrix.coeff(2,0) @@ -680,10 +746,10 @@ QTransform Transform::toQTransform(void) const * by the vector \a other to \c *this and returns a reference to \c *this. * \sa prescale() */ -template +template template -Transform& -Transform::scale(const MatrixBase &other) +Transform& +Transform::scale(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) @@ -695,8 +761,8 @@ Transform::scale(const MatrixBase &other) * and returns a reference to \c *this. * \sa prescale(Scalar) */ -template -inline Transform& Transform::scale(Scalar s) +template +inline Transform& Transform::scale(Scalar s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt() *= s; @@ -707,10 +773,10 @@ inline Transform& Transform::scale(Scalar s) * by the vector \a other to \c *this and returns a reference to \c *this. * \sa scale() */ -template +template template -Transform& -Transform::prescale(const MatrixBase &other) +Transform& +Transform::prescale(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) @@ -722,8 +788,8 @@ Transform::prescale(const MatrixBase &other) * and returns a reference to \c *this. * \sa scale(Scalar) */ -template -inline Transform& Transform::prescale(Scalar s) +template +inline Transform& Transform::prescale(Scalar s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template topRows() *= s; @@ -734,10 +800,10 @@ inline Transform& Transform::prescale(Scalar s * to \c *this and returns a reference to \c *this. * \sa pretranslate() */ -template +template template -Transform& -Transform::translate(const MatrixBase &other) +Transform& +Transform::translate(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) translationExt() += linearExt() * other; @@ -748,10 +814,10 @@ Transform::translate(const MatrixBase &other) * to \c *this and returns a reference to \c *this. * \sa translate() */ -template +template template -Transform& -Transform::pretranslate(const MatrixBase &other) +Transform& +Transform::pretranslate(const MatrixBase &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) if(int(Mode)==int(Projective)) @@ -765,7 +831,7 @@ Transform::pretranslate(const MatrixBase &other) * to \c *this and returns a reference to \c *this. * * The template parameter \a RotationType is the type of the rotation which - * must be known by ei_toRotationMatrix<>. + * must be known by internal::toRotationMatrix<>. * * Natively supported types includes: * - any scalar (2D), @@ -778,12 +844,12 @@ Transform::pretranslate(const MatrixBase &other) * * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) */ -template +template template -Transform& -Transform::rotate(const RotationType& rotation) +Transform& +Transform::rotate(const RotationType& rotation) { - linearExt() *= ei_toRotationMatrix(rotation); + linearExt() *= internal::toRotationMatrix(rotation); return *this; } @@ -794,12 +860,12 @@ Transform::rotate(const RotationType& rotation) * * \sa rotate() */ -template +template template -Transform& -Transform::prerotate(const RotationType& rotation) +Transform& +Transform::prerotate(const RotationType& rotation) { - m_matrix.template block(0,0) = ei_toRotationMatrix(rotation) + m_matrix.template block(0,0) = internal::toRotationMatrix(rotation) * m_matrix.template block(0,0); return *this; } @@ -809,9 +875,9 @@ Transform::prerotate(const RotationType& rotation) * \warning 2D only. * \sa preshear() */ -template -Transform& -Transform::shear(Scalar sx, Scalar sy) +template +Transform& +Transform::shear(Scalar sx, Scalar sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) @@ -825,9 +891,9 @@ Transform::shear(Scalar sx, Scalar sy) * \warning 2D only. * \sa shear() */ -template -Transform& -Transform::preshear(Scalar sx, Scalar sy) +template +Transform& +Transform::preshear(Scalar sx, Scalar sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) @@ -839,8 +905,8 @@ Transform::preshear(Scalar sx, Scalar sy) *** Scaling, Translation and Rotation compatibility *** ******************************************************/ -template -inline Transform& Transform::operator=(const TranslationType& t) +template +inline Transform& Transform::operator=(const TranslationType& t) { linear().setIdentity(); translation() = t.vector(); @@ -848,16 +914,16 @@ inline Transform& Transform::operator=(const T return *this; } -template -inline Transform Transform::operator*(const TranslationType& t) const +template +inline Transform Transform::operator*(const TranslationType& t) const { Transform res = *this; res.translate(t.vector()); return res; } -template -inline Transform& Transform::operator=(const UniformScaling& s) +template +inline Transform& Transform::operator=(const UniformScaling& s) { m_matrix.setZero(); linear().diagonal().fill(s.factor()); @@ -865,27 +931,27 @@ inline Transform& Transform::operator=(const U return *this; } -template -inline Transform Transform::operator*(const UniformScaling& s) const +template +inline Transform Transform::operator*(const UniformScaling& s) const { Transform res = *this; res.scale(s.factor()); return res; } -template +template template -inline Transform& Transform::operator=(const RotationBase& r) +inline Transform& Transform::operator=(const RotationBase& r) { - linear() = ei_toRotationMatrix(r); + linear() = internal::toRotationMatrix(r); translation().setZero(); makeAffine(); return *this; } -template +template template -inline Transform Transform::operator*(const RotationBase& r) const +inline Transform Transform::operator*(const RotationBase& r) const { Transform res = *this; res.rotate(r.derived()); @@ -903,9 +969,9 @@ inline Transform Transform::operator*(const Ro * * \sa computeRotationScaling(), computeScalingRotation(), class SVD */ -template -typename Transform::LinearMatrixType -Transform::rotation() const +template +const typename Transform::LinearMatrixType +Transform::rotation() const { LinearMatrixType result; computeRotationScaling(&result, (LinearMatrixType*)0); @@ -924,9 +990,9 @@ Transform::rotation() const * * \sa computeScalingRotation(), rotation(), class SVD */ -template +template template -void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const +void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const { JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); @@ -953,9 +1019,9 @@ void Transform::computeRotationScaling(RotationMatrixType *rota * * \sa computeRotationScaling(), rotation(), class SVD */ -template +template template -void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const +void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const { JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); @@ -974,29 +1040,31 @@ void Transform::computeScalingRotation(ScalingMatrixType *scali /** Convenient method to set \c *this from a position, orientation and scale * of a 3D object. */ -template +template template -Transform& -Transform::fromPositionOrientationScale(const MatrixBase &position, +Transform& +Transform::fromPositionOrientationScale(const MatrixBase &position, const OrientationType& orientation, const MatrixBase &scale) { - linear() = ei_toRotationMatrix(orientation); + linear() = internal::toRotationMatrix(orientation); linear() *= scale.asDiagonal(); translation() = position; makeAffine(); return *this; } +namespace internal { + // selector needed to avoid taking the inverse of a 3x4 matrix template -struct ei_projective_transform_inverse +struct projective_transform_inverse { static inline void run(const TransformType&, TransformType&) {} }; template -struct ei_projective_transform_inverse +struct projective_transform_inverse { static inline void run(const TransformType& m, TransformType& res) { @@ -1004,6 +1072,8 @@ struct ei_projective_transform_inverse } }; +} // end namespace internal + /** * @@ -1011,12 +1081,13 @@ struct ei_projective_transform_inverse * on \c *this. * * \param hint allows to optimize the inversion process when the transformation - * is known to be not a general transformation. The possible values are: + * is known to be not a general transformation (optional). The possible values are: * - Projective if the transformation is not necessarily affine, i.e., if the * last row is not guaranteed to be [0 ... 0 1] - * - Affine is the default, the last row is assumed to be [0 ... 0 1] + * - Affine if the last row can be assumed to be [0 ... 0 1] * - Isometry if the transformation is only a concatenations of translations * and rotations. + * The default is the template class parameter \c Mode. * * \warning unless \a traits is always set to NoShear or NoScaling, this function * requires the generic inverse method of MatrixBase defined in the LU module. If @@ -1024,14 +1095,14 @@ struct ei_projective_transform_inverse * * \sa MatrixBase::inverse() */ -template -Transform -Transform::inverse(TransformTraits hint) const +template +Transform +Transform::inverse(TransformTraits hint) const { Transform res; if (hint == Projective) { - ei_projective_transform_inverse::run(*this, res); + internal::projective_transform_inverse::run(*this, res); } else { @@ -1045,7 +1116,7 @@ Transform::inverse(TransformTraits hint) const } else { - ei_assert(false && "Invalid transform traits in Transform::Inverse"); + eigen_assert(false && "Invalid transform traits in Transform::Inverse"); } // translation and remaining parts res.matrix().template topRightCorner() @@ -1055,22 +1126,25 @@ Transform::inverse(TransformTraits hint) const return res; } +namespace internal { + /***************************************************** *** Specializations of take affine part *** *****************************************************/ -template struct ei_transform_take_affine_part { +template struct transform_take_affine_part { typedef typename TransformType::MatrixType MatrixType; typedef typename TransformType::AffinePart AffinePart; + typedef typename TransformType::ConstAffinePart ConstAffinePart; static inline AffinePart run(MatrixType& m) { return m.template block(0,0); } - static inline const AffinePart run(const MatrixType& m) + static inline ConstAffinePart run(const MatrixType& m) { return m.template block(0,0); } }; -template -struct ei_transform_take_affine_part > { - typedef typename Transform::MatrixType MatrixType; +template +struct transform_take_affine_part > { + typedef typename Transform::MatrixType MatrixType; static inline MatrixType& run(MatrixType& m) { return m; } static inline const MatrixType& run(const MatrixType& m) { return m; } }; @@ -1079,10 +1153,10 @@ struct ei_transform_take_affine_part > { *** Specializations of construct from matrix *** *****************************************************/ -template -struct ei_transform_construct_from_matrix +template +struct transform_construct_from_matrix { - static inline void run(Transform *transform, const Other& other) + static inline void run(Transform *transform, const Other& other) { transform->linear() = other; transform->translation().setZero(); @@ -1090,27 +1164,27 @@ struct ei_transform_construct_from_matrix } }; -template -struct ei_transform_construct_from_matrix +template +struct transform_construct_from_matrix { - static inline void run(Transform *transform, const Other& other) + static inline void run(Transform *transform, const Other& other) { transform->affine() = other; transform->makeAffine(); } }; -template -struct ei_transform_construct_from_matrix +template +struct transform_construct_from_matrix { - static inline void run(Transform *transform, const Other& other) + static inline void run(Transform *transform, const Other& other) { transform->matrix() = other; } }; -template -struct ei_transform_construct_from_matrix +template +struct transform_construct_from_matrix { - static inline void run(Transform *transform, const Other& other) + static inline void run(Transform *transform, const Other& other) { transform->matrix() = other.template block(0,0); } }; @@ -1119,7 +1193,7 @@ struct ei_transform_construct_from_matrix -struct ei_transform_product_result +struct transform_product_result { enum { @@ -1132,7 +1206,7 @@ struct ei_transform_product_result }; template< typename TransformType, typename MatrixType > -struct ei_transform_right_product_impl< TransformType, MatrixType, true > +struct transform_right_product_impl< TransformType, MatrixType, 0 > { typedef typename MatrixType::PlainObject ResultType; @@ -1143,7 +1217,7 @@ struct ei_transform_right_product_impl< TransformType, MatrixType, true > }; template< typename TransformType, typename MatrixType > -struct ei_transform_right_product_impl< TransformType, MatrixType, false > +struct transform_right_product_impl< TransformType, MatrixType, 1 > { enum { Dim = TransformType::Dim, @@ -1156,20 +1230,39 @@ struct ei_transform_right_product_impl< TransformType, MatrixType, false > EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other) { - EIGEN_STATIC_ASSERT(OtherRows==Dim || OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); + EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block TopLeftLhs; - typedef Block TopLeftRhs; ResultType res(other.rows(),other.cols()); + TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other; + res.row(OtherRows-1) = other.row(OtherRows-1); + + return res; + } +}; - TopLeftLhs(res, 0, 0, Dim, other.cols()) = - ( T.linear() * TopLeftRhs(other, 0, 0, Dim, other.cols()) ).colwise() + - T.translation(); +template< typename TransformType, typename MatrixType > +struct transform_right_product_impl< TransformType, MatrixType, 2 > +{ + enum { + Dim = TransformType::Dim, + HDim = TransformType::HDim, + OtherRows = MatrixType::RowsAtCompileTime, + OtherCols = MatrixType::ColsAtCompileTime + }; - // we need to take .rows() because OtherRows might be Dim or HDim - if (OtherRows==HDim) - res.row(other.rows()) = other.row(other.rows()); + typedef typename MatrixType::PlainObject ResultType; + + EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other) + { + EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); + + typedef Block TopLeftLhs; + + ResultType res(other.rows(),other.cols()); + TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.linear() * other; + TopLeftLhs(res, 0, 0, Dim, other.cols()).colwise() += T.translation(); return res; } @@ -1180,23 +1273,23 @@ struct ei_transform_right_product_impl< TransformType, MatrixType, false > **********************************************************/ // generic HDim x HDim matrix * T => Projective -template -struct ei_transform_left_product_impl +template +struct transform_left_product_impl { - typedef Transform TransformType; + typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; - typedef Transform ResultType; + typedef Transform ResultType; static ResultType run(const Other& other,const TransformType& tr) { return ResultType(other * tr.matrix()); } }; // generic HDim x HDim matrix * AffineCompact => Projective -template -struct ei_transform_left_product_impl +template +struct transform_left_product_impl { - typedef Transform TransformType; + typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; - typedef Transform ResultType; + typedef Transform ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; @@ -1207,10 +1300,10 @@ struct ei_transform_left_product_impl }; // affine matrix * T -template -struct ei_transform_left_product_impl +template +struct transform_left_product_impl { - typedef Transform TransformType; + typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) @@ -1223,10 +1316,10 @@ struct ei_transform_left_product_impl }; // affine matrix * AffineCompact -template -struct ei_transform_left_product_impl +template +struct transform_left_product_impl { - typedef Transform TransformType; + typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) @@ -1239,10 +1332,10 @@ struct ei_transform_left_product_impl }; // linear matrix * T -template -struct ei_transform_left_product_impl +template +struct transform_left_product_impl { - typedef Transform TransformType; + typedef Transform TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other, const TransformType& tr) @@ -1260,13 +1353,13 @@ struct ei_transform_left_product_impl *** Specializations of operator* with another Transform *** **********************************************************/ -template -struct ei_transform_transform_product_impl,Transform,false > +template +struct transform_transform_product_impl,Transform,false > { - enum { ResultMode = ei_transform_product_result::Mode }; - typedef Transform Lhs; - typedef Transform Rhs; - typedef Transform ResultType; + enum { ResultMode = transform_product_result::Mode }; + typedef Transform Lhs; + typedef Transform Rhs; + typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; @@ -1277,11 +1370,11 @@ struct ei_transform_transform_product_impl,Transfo } }; -template -struct ei_transform_transform_product_impl,Transform,true > +template +struct transform_transform_product_impl,Transform,true > { - typedef Transform Lhs; - typedef Transform Rhs; + typedef Transform Lhs; + typedef Transform Rhs; typedef Transform ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { @@ -1289,4 +1382,6 @@ struct ei_transform_transform_product_impl,Transfo } }; +} // end namespace internal + #endif // EIGEN_TRANSFORM_H diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Translation.h b/gtsam/3rdparty/Eigen/src/Geometry/Translation.h index 59d3e4a41..d8fe50f98 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Translation.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Translation.h @@ -66,14 +66,14 @@ public: /** */ inline Translation(const Scalar& sx, const Scalar& sy) { - ei_assert(Dim==2); + eigen_assert(Dim==2); m_coeffs.x() = sx; m_coeffs.y() = sy; } /** */ inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) { - ei_assert(Dim==3); + eigen_assert(Dim==3); m_coeffs.x() = sx; m_coeffs.y() = sy; m_coeffs.z() = sz; @@ -132,8 +132,8 @@ public: } /** Concatenates a translation and a transformation */ - template - inline Transform operator* (const Transform& t) const + template + inline Transform operator* (const Transform& t) const { Transform res = t; res.pretranslate(m_coeffs); @@ -161,8 +161,8 @@ public: * then this function smartly returns a const reference to \c *this. */ template - inline typename ei_cast_return_type >::type cast() const - { return typename ei_cast_return_type >::type(*this); } + inline typename internal::cast_return_type >::type cast() const + { return typename internal::cast_return_type >::type(*this); } /** Copy constructor with scalar type conversion */ template diff --git a/gtsam/3rdparty/Eigen/src/Geometry/Umeyama.h b/gtsam/3rdparty/Eigen/src/Geometry/Umeyama.h index 04449f623..b50f46173 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/Umeyama.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/Umeyama.h @@ -36,30 +36,31 @@ // These helpers are required since it allows to use mixed types as parameters // for the Umeyama. The problem with mixed parameters is that the return type // cannot trivially be deduced when float and double types are mixed. -namespace +namespace internal { + +// Compile time return type deduction for different MatrixBase types. +// Different means here different alignment and parameters but the same underlying +// real scalar type. +template +struct umeyama_transform_matrix_type { - // Compile time return type deduction for different MatrixBase types. - // Different means here different alignment and parameters but the same underlying - // real scalar type. - template - struct ei_umeyama_transform_matrix_type - { - enum { - MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), + enum { + MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), - // When possible we want to choose some small fixed size value since the result - // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. - HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 - }; - - typedef Matrix::Scalar, - HomogeneousDimension, - HomogeneousDimension, - AutoAlign | (ei_traits::Flags & RowMajorBit ? RowMajor : ColMajor), - HomogeneousDimension, - HomogeneousDimension - > type; + // When possible we want to choose some small fixed size value since the result + // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. + HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 }; + + typedef Matrix::Scalar, + HomogeneousDimension, + HomogeneousDimension, + AutoAlign | (traits::Flags & RowMajorBit ? RowMajor : ColMajor), + HomogeneousDimension, + HomogeneousDimension + > type; +}; + } #endif @@ -103,23 +104,23 @@ namespace * Eigen::Matrix. */ template -typename ei_umeyama_transform_matrix_type::type +typename internal::umeyama_transform_matrix_type::type umeyama(const MatrixBase& src, const MatrixBase& dst, bool with_scaling = true) { - typedef typename ei_umeyama_transform_matrix_type::type TransformationMatrixType; - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::umeyama_transform_matrix_type::type TransformationMatrixType; + typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename Derived::Index Index; EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) - EIGEN_STATIC_ASSERT((ei_is_same_type::Scalar>::ret), + EIGEN_STATIC_ASSERT((internal::is_same::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; typedef Matrix VectorType; typedef Matrix MatrixType; - typedef typename ei_plain_matrix_type_row_major::type RowMajorMatrixType; + typedef typename internal::plain_matrix_type_row_major::type RowMajorMatrixType; const Index m = src.rows(); // dimension const Index n = src.cols(); // number of measurements @@ -152,7 +153,7 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo // Eq. (40) and (43) const VectorType& d = svd.singularValues(); - Index rank = 0; for (Index i=0; i 0 ) { Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); diff --git a/gtsam/3rdparty/Eigen/src/Geometry/arch/Geometry_SSE.h b/gtsam/3rdparty/Eigen/src/Geometry/arch/Geometry_SSE.h index 7d82be694..cbe695c72 100644 --- a/gtsam/3rdparty/Eigen/src/Geometry/arch/Geometry_SSE.h +++ b/gtsam/3rdparty/Eigen/src/Geometry/arch/Geometry_SSE.h @@ -26,8 +26,10 @@ #ifndef EIGEN_GEOMETRY_SSE_H #define EIGEN_GEOMETRY_SSE_H +namespace internal { + template -struct ei_quat_product +struct quat_product { inline static Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { @@ -35,31 +37,31 @@ struct ei_quat_product Quaternion res; __m128 a = _a.coeffs().template packet(0); __m128 b = _b.coeffs().template packet(0); - __m128 flip1 = _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a,1,2,0,2), - ei_vec4f_swizzle1(b,2,0,1,2)),mask); - __m128 flip2 = _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a,3,3,3,1), - ei_vec4f_swizzle1(b,0,1,2,1)),mask); - ei_pstore(&res.x(), - _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,ei_vec4f_swizzle1(b,3,3,3,3)), - _mm_mul_ps(ei_vec4f_swizzle1(a,2,0,1,0), - ei_vec4f_swizzle1(b,1,2,0,0))), + __m128 flip1 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,1,2,0,2), + vec4f_swizzle1(b,2,0,1,2)),mask); + __m128 flip2 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,3,3,3,1), + vec4f_swizzle1(b,0,1,2,1)),mask); + pstore(&res.x(), + _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)), + _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0), + vec4f_swizzle1(b,1,2,0,0))), _mm_add_ps(flip1,flip2))); return res; } }; template -struct ei_cross3_impl +struct cross3_impl { - inline static typename ei_plain_matrix_type::type + inline static typename plain_matrix_type::type run(const VectorLhs& lhs, const VectorRhs& rhs) { __m128 a = lhs.template packet(0); __m128 b = rhs.template packet(0); - __m128 mul1=_mm_mul_ps(ei_vec4f_swizzle1(a,1,2,0,3),ei_vec4f_swizzle1(b,2,0,1,3)); - __m128 mul2=_mm_mul_ps(ei_vec4f_swizzle1(a,2,0,1,3),ei_vec4f_swizzle1(b,1,2,0,3)); - typename ei_plain_matrix_type::type res; - ei_pstore(&res.x(),_mm_sub_ps(mul1,mul2)); + __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); + __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); + typename plain_matrix_type::type res; + pstore(&res.x(),_mm_sub_ps(mul1,mul2)); return res; } }; @@ -68,7 +70,7 @@ struct ei_cross3_impl template -struct ei_quat_product +struct quat_product { inline static Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) { @@ -79,10 +81,10 @@ struct ei_quat_product(0); Packet2d b_zw = _b.coeffs().template packet(2); - Packet2d a_xx = ei_pset1(a[0]); - Packet2d a_yy = ei_pset1(a[1]); - Packet2d a_zz = ei_pset1(a[2]); - Packet2d a_ww = ei_pset1(a[3]); + Packet2d a_xx = pset1(a[0]); + Packet2d a_yy = pset1(a[1]); + Packet2d a_zz = pset1(a[2]); + Packet2d a_ww = pset1(a[3]); // two temporaries: Packet2d t1, t2; @@ -92,13 +94,13 @@ struct ei_quat_product -void ei_make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) +void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) { typedef typename TriangularFactorType::Index Index; typedef typename VectorsType::Scalar Scalar; const Index nbVecs = vectors.cols(); - ei_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); + eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); for(Index i = 0; i < nbVecs; i++) { @@ -54,13 +56,13 @@ void ei_make_block_householder_triangular_factor(TriangularFactorType& triFactor /** \internal */ template -void ei_apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs) +void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs) { typedef typename MatrixType::Index Index; enum { TFactorSize = MatrixType::ColsAtCompileTime }; Index nbVecs = vectors.cols(); Matrix T(nbVecs,nbVecs); - ei_make_block_householder_triangular_factor(T, vectors, hCoeffs); + make_block_householder_triangular_factor(T, vectors, hCoeffs); const TriangularView& V(vectors); @@ -72,5 +74,6 @@ void ei_apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& mat.noalias() -= V * tmp; } +} // end namespace internal #endif // EIGEN_BLOCK_HOUSEHOLDER_H diff --git a/gtsam/3rdparty/Eigen/src/Householder/Householder.h b/gtsam/3rdparty/Eigen/src/Householder/Householder.h index c45e6469d..9ade04c19 100644 --- a/gtsam/3rdparty/Eigen/src/Householder/Householder.h +++ b/gtsam/3rdparty/Eigen/src/Householder/Householder.h @@ -26,17 +26,19 @@ #ifndef EIGEN_HOUSEHOLDER_H #define EIGEN_HOUSEHOLDER_H -template struct ei_decrement_size +namespace internal { +template struct decrement_size { enum { ret = n==Dynamic ? n : n-1 }; }; +} template void MatrixBase::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) { - VectorBlock::ret> essentialPart(derived(), 1, size()-1); + VectorBlock::ret> essentialPart(derived(), 1, size()-1); makeHouseholder(essentialPart, tau, beta); } @@ -63,23 +65,23 @@ void MatrixBase::makeHouseholder( RealScalar& beta) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) - VectorBlock tail(derived(), 1, size()-1); + VectorBlock tail(derived(), 1, size()-1); RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); Scalar c0 = coeff(0); - if(tailSqNorm == RealScalar(0) && ei_imag(c0)==RealScalar(0)) + if(tailSqNorm == RealScalar(0) && internal::imag(c0)==RealScalar(0)) { tau = 0; - beta = ei_real(c0); + beta = internal::real(c0); } else { - beta = ei_sqrt(ei_abs2(c0) + tailSqNorm); - if (ei_real(c0)>=RealScalar(0)) + beta = internal::sqrt(internal::abs2(c0) + tailSqNorm); + if (internal::real(c0)>=RealScalar(0)) beta = -beta; essential = tail / (c0 - beta); - tau = ei_conj((beta - c0) / beta); + tau = internal::conj((beta - c0) / beta); } } @@ -96,7 +98,7 @@ void MatrixBase::applyHouseholderOnTheLeft( } else { - Map::type> tmp(workspace,cols()); + Map::type> tmp(workspace,cols()); Block bottom(derived(), 1, 0, rows()-1, cols()); tmp.noalias() = essential.adjoint() * bottom; tmp += this->row(0); @@ -118,7 +120,7 @@ void MatrixBase::applyHouseholderOnTheRight( } else { - Map::type> tmp(workspace,rows()); + Map::type> tmp(workspace,rows()); Block right(derived(), 0, 1, rows(), cols()-1); tmp.noalias() = right * essential.conjugate(); tmp += this->col(0); diff --git a/gtsam/3rdparty/Eigen/src/Householder/HouseholderSequence.h b/gtsam/3rdparty/Eigen/src/Householder/HouseholderSequence.h index 0e9e85553..717f29c99 100644 --- a/gtsam/3rdparty/Eigen/src/Householder/HouseholderSequence.h +++ b/gtsam/3rdparty/Eigen/src/Householder/HouseholderSequence.h @@ -29,12 +29,28 @@ /** \ingroup Householder_Module * \householder_module * \class HouseholderSequence - * \brief Represents a sequence of householder reflections with decreasing size + * \brief Sequence of Householder reflections acting on subspaces with decreasing size + * \tparam VectorsType type of matrix containing the Householder vectors + * \tparam CoeffsType type of vector containing the Householder coefficients + * \tparam Side either OnTheLeft (the default) or OnTheRight * - * This class represents a product sequence of householder reflections \f$ H = \Pi_0^{n-1} H_i \f$ - * where \f$ H_i \f$ is the i-th householder transformation \f$ I - h_i v_i v_i^* \f$, - * \f$ v_i \f$ is the i-th householder vector \f$ [ 1, m_vectors(i+1,i), m_vectors(i+2,i), ...] \f$ - * and \f$ h_i \f$ is the i-th householder coefficient \c m_coeffs[i]. + * This class represents a product sequence of Householder reflections where the first Householder reflection + * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by + * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace + * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but + * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections + * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods + * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(), + * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence. + * + * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the + * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i + * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$ + * v_i \f$ is a vector of the form + * \f[ + * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. + * \f] + * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector. * * Typical usages are listed below, where H is a HouseholderSequence: * \code @@ -44,127 +60,191 @@ * A.applyOnTheLeft(H.adjoint()); // A = H^* * A * MatrixXd Q = H; // conversion to a dense matrix * \endcode - * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate. + * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators. + * + * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example. * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ +namespace internal { + template -struct ei_traits > +struct traits > { typedef typename VectorsType::Scalar Scalar; typedef typename VectorsType::Index Index; typedef typename VectorsType::StorageKind StorageKind; enum { - RowsAtCompileTime = Side==OnTheLeft ? ei_traits::RowsAtCompileTime - : ei_traits::ColsAtCompileTime, + RowsAtCompileTime = Side==OnTheLeft ? traits::RowsAtCompileTime + : traits::ColsAtCompileTime, ColsAtCompileTime = RowsAtCompileTime, - MaxRowsAtCompileTime = Side==OnTheLeft ? ei_traits::MaxRowsAtCompileTime - : ei_traits::MaxColsAtCompileTime, + MaxRowsAtCompileTime = Side==OnTheLeft ? traits::MaxRowsAtCompileTime + : traits::MaxColsAtCompileTime, MaxColsAtCompileTime = MaxRowsAtCompileTime, Flags = 0 }; }; template -struct ei_hseq_side_dependent_impl +struct hseq_side_dependent_impl { - typedef Block EssentialVectorType; + typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; typedef typename VectorsType::Index Index; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; - return Block(h.m_vectors, start, k, h.rows()-start, 1); + return Block(h.m_vectors, start, k, h.rows()-start, 1); } }; template -struct ei_hseq_side_dependent_impl +struct hseq_side_dependent_impl { - typedef Transpose > EssentialVectorType; + typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; typedef typename VectorsType::Index Index; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; - return Block(h.m_vectors, k, start, 1, h.rows()-start).transpose(); + return Block(h.m_vectors, k, start, 1, h.rows()-start).transpose(); } }; -template struct ei_matrix_type_times_scalar_type +template struct matrix_type_times_scalar_type { - typedef typename ei_scalar_product_traits::ReturnType + typedef typename scalar_product_traits::ReturnType ResultScalar; typedef Matrix Type; }; +} // end namespace internal + template class HouseholderSequence : public EigenBase > { enum { - RowsAtCompileTime = ei_traits::RowsAtCompileTime, - ColsAtCompileTime = ei_traits::ColsAtCompileTime, - MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, - MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; typedef typename VectorsType::Index Index; - typedef typename ei_hseq_side_dependent_impl::EssentialVectorType + typedef typename internal::hseq_side_dependent_impl::EssentialVectorType EssentialVectorType; public: typedef HouseholderSequence< VectorsType, - typename ei_meta_if::IsComplex, - typename ei_cleantype::type, - CoeffsType>::ret, + typename internal::conditional::IsComplex, + typename internal::remove_all::type, + CoeffsType>::type, Side > ConjugateReturnType; - HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans = false) - : m_vectors(v), m_coeffs(h), m_trans(trans), m_actualVectors(v.diagonalSize()), + /** \brief Constructor. + * \param[in] v %Matrix containing the essential parts of the Householder vectors + * \param[in] h Vector containing the Householder coefficients + * + * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The + * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th + * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the + * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many + * Householder reflections as there are columns. + * + * \note The %HouseholderSequence object stores \p v and \p h by reference. + * + * Example: \include HouseholderSequence_HouseholderSequence.cpp + * Output: \verbinclude HouseholderSequence_HouseholderSequence.out + * + * \sa setLength(), setShift() + */ + HouseholderSequence(const VectorsType& v, const CoeffsType& h) + : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()), m_shift(0) { } - HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, Index actualVectors, Index shift) - : m_vectors(v), m_coeffs(h), m_trans(trans), m_actualVectors(actualVectors), m_shift(shift) + /** \brief Copy constructor. */ + HouseholderSequence(const HouseholderSequence& other) + : m_vectors(other.m_vectors), + m_coeffs(other.m_coeffs), + m_trans(other.m_trans), + m_length(other.m_length), + m_shift(other.m_shift) { } + /** \brief Number of rows of transformation viewed as a matrix. + * \returns Number of rows + * \details This equals the dimension of the space that the transformation acts on. + */ Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } + + /** \brief Number of columns of transformation viewed as a matrix. + * \returns Number of columns + * \details This equals the dimension of the space that the transformation acts on. + */ Index cols() const { return rows(); } + /** \brief Essential part of a Householder vector. + * \param[in] k Index of Householder reflection + * \returns Vector containing non-trivial entries of k-th Householder vector + * + * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of + * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector + * \f[ + * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. + * \f] + * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v + * passed to the constructor. + * + * \sa setShift(), shift() + */ const EssentialVectorType essentialVector(Index k) const { - ei_assert(k >= 0 && k < m_actualVectors); - return ei_hseq_side_dependent_impl::essentialVector(*this, k); + eigen_assert(k >= 0 && k < m_length); + return internal::hseq_side_dependent_impl::essentialVector(*this, k); } + /** \brief %Transpose of the Householder sequence. */ HouseholderSequence transpose() const - { return HouseholderSequence(m_vectors, m_coeffs, !m_trans, m_actualVectors, m_shift); } + { + return HouseholderSequence(*this).setTrans(!m_trans); + } + /** \brief Complex conjugate of the Householder sequence. */ ConjugateReturnType conjugate() const - { return ConjugateReturnType(m_vectors, m_coeffs.conjugate(), m_trans, m_actualVectors, m_shift); } + { + return ConjugateReturnType(m_vectors, m_coeffs.conjugate()) + .setTrans(m_trans) + .setLength(m_length) + .setShift(m_shift); + } + /** \brief Adjoint (conjugate transpose) of the Householder sequence. */ ConjugateReturnType adjoint() const - { return ConjugateReturnType(m_vectors, m_coeffs.conjugate(), !m_trans, m_actualVectors, m_shift); } + { + return conjugate().setTrans(!m_trans); + } + /** \brief Inverse of the Householder sequence (equals the adjoint). */ ConjugateReturnType inverse() const { return adjoint(); } /** \internal */ template void evalTo(DestType& dst) const { - Index vecs = m_actualVectors; - // FIXME find a way to pass this temporary if the user want to + Index vecs = m_length; + // FIXME find a way to pass this temporary if the user wants to Matrix temp(rows()); - if( ei_is_same_type::type,DestType>::ret - && ei_extract_data(dst) == ei_extract_data(m_vectors)) + if( internal::is_same::type,DestType>::value + && internal::extract_data(dst) == internal::extract_data(m_vectors)) { // in-place dst.diagonal().setOnes(); @@ -206,9 +286,9 @@ template class HouseholderS template inline void applyThisOnTheRight(Dest& dst) const { Matrix temp(dst.rows()); - for(Index k = 0; k < m_actualVectors; ++k) + for(Index k = 0; k < m_length; ++k) { - Index actual_k = m_trans ? m_actualVectors-k-1 : k; + Index actual_k = m_trans ? m_length-k-1 : k; dst.rightCols(rows()-m_shift-actual_k) .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); } @@ -218,68 +298,132 @@ template class HouseholderS template inline void applyThisOnTheLeft(Dest& dst) const { Matrix temp(dst.cols()); - for(Index k = 0; k < m_actualVectors; ++k) + for(Index k = 0; k < m_length; ++k) { - Index actual_k = m_trans ? k : m_actualVectors-k-1; + Index actual_k = m_trans ? k : m_length-k-1; dst.bottomRows(rows()-m_shift-actual_k) .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); } } + /** \brief Computes the product of a Householder sequence with a matrix. + * \param[in] other %Matrix being multiplied. + * \returns Expression object representing the product. + * + * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this + * and \f$ M \f$ is the matrix \p other. + */ template - typename ei_matrix_type_times_scalar_type::Type operator*(const MatrixBase& other) const + typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other) const { - typename ei_matrix_type_times_scalar_type::Type - res(other.template cast::ResultScalar>()); + typename internal::matrix_type_times_scalar_type::Type + res(other.template cast::ResultScalar>()); applyThisOnTheLeft(res); return res; } - template friend - typename ei_matrix_type_times_scalar_type::Type operator*(const MatrixBase& other, const HouseholderSequence& h) + template friend struct internal::hseq_side_dependent_impl; + + /** \brief Sets the length of the Householder sequence. + * \param [in] length New value for the length. + * + * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set + * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that + * is smaller. After this function is called, the length equals \p length. + * + * \sa length() + */ + HouseholderSequence& setLength(Index length) { - typename ei_matrix_type_times_scalar_type::Type - res(other.template cast::ResultScalar>()); - h.applyThisOnTheRight(res); - return res; + m_length = length; + return *this; } - template friend struct ei_hseq_side_dependent_impl; + /** \brief Sets the shift of the Householder sequence. + * \param [in] shift New value for the shift. + * + * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th + * column of the matrix \p v passed to the constructor corresponds to the i-th Householder + * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}} + * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th + * Householder reflection. + * + * \sa shift() + */ + HouseholderSequence& setShift(Index shift) + { + m_shift = shift; + return *this; + } + + Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */ + Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */ + + /* Necessary for .adjoint() and .conjugate() */ + template friend class HouseholderSequence; protected: + + /** \brief Sets the transpose flag. + * \param [in] trans New value of the transpose flag. + * + * By default, the transpose flag is not set. If the transpose flag is set, then this object represents + * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$. + * + * \sa trans() + */ + HouseholderSequence& setTrans(bool trans) + { + m_trans = trans; + return *this; + } + + bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */ + typename VectorsType::Nested m_vectors; typename CoeffsType::Nested m_coeffs; bool m_trans; - Index m_actualVectors; + Index m_length; Index m_shift; }; -template -HouseholderSequence householderSequence(const VectorsType& v, const CoeffsType& h, bool trans=false) +/** \brief Computes the product of a matrix with a Householder sequence. + * \param[in] other %Matrix being multiplied. + * \param[in] h %HouseholderSequence being multiplied. + * \returns Expression object representing the product. + * + * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the + * Householder sequence represented by \p h. + */ +template +typename internal::matrix_type_times_scalar_type::Type operator*(const MatrixBase& other, const HouseholderSequence& h) { - return HouseholderSequence(v, h, trans); + typename internal::matrix_type_times_scalar_type::Type + res(other.template cast::ResultScalar>()); + h.applyThisOnTheRight(res); + return res; } +/** \ingroup Householder_Module \householder_module + * \brief Convenience function for constructing a Householder sequence. + * \returns A HouseholderSequence constructed from the specified arguments. + */ template -HouseholderSequence householderSequence - (const VectorsType& v, const CoeffsType& h, - bool trans, typename VectorsType::Index actualVectors, typename VectorsType::Index shift) +HouseholderSequence householderSequence(const VectorsType& v, const CoeffsType& h) { - return HouseholderSequence(v, h, trans, actualVectors, shift); + return HouseholderSequence(v, h); } +/** \ingroup Householder_Module \householder_module + * \brief Convenience function for constructing a Householder sequence. + * \returns A HouseholderSequence constructed from the specified arguments. + * \details This function differs from householderSequence() in that the template argument \p OnTheSide of + * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft. + */ template -HouseholderSequence rightHouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans=false) +HouseholderSequence rightHouseholderSequence(const VectorsType& v, const CoeffsType& h) { - return HouseholderSequence(v, h, trans); -} - -template -HouseholderSequence rightHouseholderSequence - (const VectorsType& v, const CoeffsType& h, bool trans, - typename VectorsType::Index actualVectors, typename VectorsType::Index shift) -{ - return HouseholderSequence(v, h, trans, actualVectors, shift); + return HouseholderSequence(v, h); } #endif // EIGEN_HOUSEHOLDER_SEQUENCE_H diff --git a/gtsam/3rdparty/Eigen/src/Jacobi/Jacobi.h b/gtsam/3rdparty/Eigen/src/Jacobi/Jacobi.h index 6fd1ed389..fb5a2c717 100644 --- a/gtsam/3rdparty/Eigen/src/Jacobi/Jacobi.h +++ b/gtsam/3rdparty/Eigen/src/Jacobi/Jacobi.h @@ -28,8 +28,8 @@ /** \ingroup Jacobi_Module * \jacobi_module - * \class PlanarRotation - * \brief Represents a rotation in the plane from a cosine-sine pair. + * \class JacobiRotation + * \brief Rotation given by a cosine-sine pair. * * This class represents a Jacobi or Givens rotation. * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by @@ -44,16 +44,16 @@ * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ -template class PlanarRotation +template class JacobiRotation { public: typedef typename NumTraits::Real RealScalar; /** Default constructor without any initialization. */ - PlanarRotation() {} + JacobiRotation() {} /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */ - PlanarRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {} + JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {} Scalar& c() { return m_c; } Scalar c() const { return m_c; } @@ -61,17 +61,17 @@ template class PlanarRotation Scalar s() const { return m_s; } /** Concatenates two planar rotation */ - PlanarRotation operator*(const PlanarRotation& other) + JacobiRotation operator*(const JacobiRotation& other) { - return PlanarRotation(m_c * other.m_c - ei_conj(m_s) * other.m_s, - ei_conj(m_c * ei_conj(other.m_s) + ei_conj(m_s) * ei_conj(other.m_c))); + return JacobiRotation(m_c * other.m_c - internal::conj(m_s) * other.m_s, + internal::conj(m_c * internal::conj(other.m_s) + internal::conj(m_s) * internal::conj(other.m_c))); } /** Returns the transposed transformation */ - PlanarRotation transpose() const { return PlanarRotation(m_c, -ei_conj(m_s)); } + JacobiRotation transpose() const { return JacobiRotation(m_c, -internal::conj(m_s)); } /** Returns the adjoint transformation */ - PlanarRotation adjoint() const { return PlanarRotation(ei_conj(m_c), -m_s); } + JacobiRotation adjoint() const { return JacobiRotation(internal::conj(m_c), -m_s); } template bool makeJacobi(const MatrixBase&, typename Derived::Index p, typename Derived::Index q); @@ -80,8 +80,8 @@ template class PlanarRotation void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0); protected: - void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, ei_meta_true); - void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, ei_meta_false); + void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type); + void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type); Scalar m_c, m_s; }; @@ -92,7 +92,7 @@ template class PlanarRotation * \sa MatrixBase::makeJacobi(const MatrixBase&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template -bool PlanarRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) +bool JacobiRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) { typedef typename NumTraits::Real RealScalar; if(y == Scalar(0)) @@ -103,8 +103,8 @@ bool PlanarRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) } else { - RealScalar tau = (x-z)/(RealScalar(2)*ei_abs(y)); - RealScalar w = ei_sqrt(ei_abs2(tau) + 1); + RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y)); + RealScalar w = internal::sqrt(internal::abs2(tau) + 1); RealScalar t; if(tau>0) { @@ -115,8 +115,8 @@ bool PlanarRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) t = RealScalar(1) / (tau - w); } RealScalar sign_t = t > 0 ? 1 : -1; - RealScalar n = RealScalar(1) / ei_sqrt(ei_abs2(t)+1); - m_s = - sign_t * (ei_conj(y) / ei_abs(y)) * ei_abs(t) * n; + RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+1); + m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n; m_c = n; return true; } @@ -129,13 +129,13 @@ bool PlanarRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) * Example: \include Jacobi_makeJacobi.cpp * Output: \verbinclude Jacobi_makeJacobi.out * - * \sa PlanarRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template template -inline bool PlanarRotation::makeJacobi(const MatrixBase& m, typename Derived::Index p, typename Derived::Index q) +inline bool JacobiRotation::makeJacobi(const MatrixBase& m, typename Derived::Index p, typename Derived::Index q) { - return makeJacobi(ei_real(m.coeff(p,p)), m.coeff(p,q), ei_real(m.coeff(q,q))); + return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q))); } /** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector @@ -155,62 +155,62 @@ inline bool PlanarRotation::makeJacobi(const MatrixBase& m, typ * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template -void PlanarRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* z) +void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* z) { - makeGivens(p, q, z, typename ei_meta_if::IsComplex, ei_meta_true, ei_meta_false>::ret()); + makeGivens(p, q, z, typename internal::conditional::IsComplex, internal::true_type, internal::false_type>::type()); } // specialization for complexes template -void PlanarRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, ei_meta_true) +void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type) { if(q==Scalar(0)) { - m_c = ei_real(p)<0 ? Scalar(-1) : Scalar(1); + m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1); m_s = 0; if(r) *r = m_c * p; } else if(p==Scalar(0)) { m_c = 0; - m_s = -q/ei_abs(q); - if(r) *r = ei_abs(q); + m_s = -q/internal::abs(q); + if(r) *r = internal::abs(q); } else { - RealScalar p1 = ei_norm1(p); - RealScalar q1 = ei_norm1(q); + RealScalar p1 = internal::norm1(p); + RealScalar q1 = internal::norm1(q); if(p1>=q1) { Scalar ps = p / p1; - RealScalar p2 = ei_abs2(ps); + RealScalar p2 = internal::abs2(ps); Scalar qs = q / p1; - RealScalar q2 = ei_abs2(qs); + RealScalar q2 = internal::abs2(qs); - RealScalar u = ei_sqrt(RealScalar(1) + q2/p2); - if(ei_real(p)::makeGivens(const Scalar& p, const Scalar& q, Scalar // specialization for reals template -void PlanarRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, ei_meta_false) +void JacobiRotation::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type) { if(q==0) { m_c = p ei_abs(q)) + else if(internal::abs(p) > internal::abs(q)) { Scalar t = q/p; - Scalar u = ei_sqrt(Scalar(1) + ei_abs2(t)); + Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t)); if(p::makeGivens(const Scalar& p, const Scalar& q, Scalar else { Scalar t = p/q; - Scalar u = ei_sqrt(Scalar(1) + ei_abs2(t)); + Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t)); if(q::makeGivens(const Scalar& p, const Scalar& q, Scalar * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ +namespace internal { template -void ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotation& j); +void apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation& j); +} /** \jacobi_module * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B, * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$. * - * \sa class PlanarRotation, MatrixBase::applyOnTheRight(), ei_apply_rotation_in_the_plane() + * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane() */ template template -inline void MatrixBase::applyOnTheLeft(Index p, Index q, const PlanarRotation& j) +inline void MatrixBase::applyOnTheLeft(Index p, Index q, const JacobiRotation& j) { RowXpr x(this->row(p)); RowXpr y(this->row(q)); - ei_apply_rotation_in_the_plane(x, y, j); + internal::apply_rotation_in_the_plane(x, y, j); } /** \ingroup Jacobi_Module * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$. * - * \sa class PlanarRotation, MatrixBase::applyOnTheLeft(), ei_apply_rotation_in_the_plane() + * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane() */ template template -inline void MatrixBase::applyOnTheRight(Index p, Index q, const PlanarRotation& j) +inline void MatrixBase::applyOnTheRight(Index p, Index q, const JacobiRotation& j) { ColXpr x(this->col(p)); ColXpr y(this->col(q)); - ei_apply_rotation_in_the_plane(x, y, j.transpose()); + internal::apply_rotation_in_the_plane(x, y, j.transpose()); } - +namespace internal { template -void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotation& j) +void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation& j) { typedef typename VectorX::Index Index; typedef typename VectorX::Scalar Scalar; - enum { PacketSize = ei_packet_traits::size }; - typedef typename ei_packet_traits::type Packet; - ei_assert(_x.size() == _y.size()); + enum { PacketSize = packet_traits::size }; + typedef typename packet_traits::type Packet; + eigen_assert(_x.size() == _y.size()); Index size = _x.size(); Index incrx = _x.innerStride(); Index incry = _y.innerStride(); @@ -324,32 +326,32 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& // both vectors are sequentially stored in memory => vectorization enum { Peeling = 2 }; - Index alignedStart = ei_first_aligned(y, size); + Index alignedStart = first_aligned(y, size); Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; - const Packet pc = ei_pset1(j.c()); - const Packet ps = ei_pset1(j.s()); - ei_conj_helper::IsComplex,false> pcj; + const Packet pc = pset1(j.c()); + const Packet ps = pset1(j.s()); + conj_helper::IsComplex,false> pcj; for(Index i=0; i(px); - Packet yi = ei_pload(py); - ei_pstore(px, ei_padd(ei_pmul(pc,xi),pcj.pmul(ps,yi))); - ei_pstore(py, ei_psub(pcj.pmul(pc,yi),ei_pmul(ps,xi))); + Packet xi = pload(px); + Packet yi = pload(py); + pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); + pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi))); px += PacketSize; py += PacketSize; } @@ -359,23 +361,23 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize); for(Index i=alignedStart; i(px); - Packet xi1 = ei_ploadu(px+PacketSize); - Packet yi = ei_pload (py); - Packet yi1 = ei_pload (py+PacketSize); - ei_pstoreu(px, ei_padd(ei_pmul(pc,xi),pcj.pmul(ps,yi))); - ei_pstoreu(px+PacketSize, ei_padd(ei_pmul(pc,xi1),pcj.pmul(ps,yi1))); - ei_pstore (py, ei_psub(pcj.pmul(pc,yi),ei_pmul(ps,xi))); - ei_pstore (py+PacketSize, ei_psub(pcj.pmul(pc,yi1),ei_pmul(ps,xi1))); + Packet xi = ploadu(px); + Packet xi1 = ploadu(px+PacketSize); + Packet yi = pload (py); + Packet yi1 = pload (py+PacketSize); + pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); + pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1))); + pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi))); + pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1))); px += Peeling*PacketSize; py += Peeling*PacketSize; } if(alignedEnd!=peelingEnd) { - Packet xi = ei_ploadu(x+peelingEnd); - Packet yi = ei_pload (y+peelingEnd); - ei_pstoreu(x+peelingEnd, ei_padd(ei_pmul(pc,xi),pcj.pmul(ps,yi))); - ei_pstore (y+peelingEnd, ei_psub(pcj.pmul(pc,yi),ei_pmul(ps,xi))); + Packet xi = ploadu(x+peelingEnd); + Packet yi = pload (y+peelingEnd); + pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi))); + pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi))); } } @@ -383,8 +385,8 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& { Scalar xi = x[i]; Scalar yi = y[i]; - x[i] = j.c() * xi + ei_conj(j.s()) * yi; - y[i] = -j.s() * xi + ei_conj(j.c()) * yi; + x[i] = j.c() * xi + conj(j.s()) * yi; + y[i] = -j.s() * xi + conj(j.c()) * yi; } } @@ -393,17 +395,17 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& (VectorX::Flags & VectorY::Flags & PacketAccessBit) && (VectorX::Flags & VectorY::Flags & AlignedBit)) { - const Packet pc = ei_pset1(j.c()); - const Packet ps = ei_pset1(j.s()); - ei_conj_helper::IsComplex,false> pcj; + const Packet pc = pset1(j.c()); + const Packet ps = pset1(j.s()); + conj_helper::IsComplex,false> pcj; Scalar* EIGEN_RESTRICT px = x; Scalar* EIGEN_RESTRICT py = y; for(Index i=0; i(px); - Packet yi = ei_pload(py); - ei_pstore(px, ei_padd(ei_pmul(pc,xi),pcj.pmul(ps,yi))); - ei_pstore(py, ei_psub(pcj.pmul(pc,yi),ei_pmul(ps,xi))); + Packet xi = pload(px); + Packet yi = pload(py); + pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi))); + pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi))); px += PacketSize; py += PacketSize; } @@ -416,12 +418,13 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& { Scalar xi = *x; Scalar yi = *y; - *x = j.c() * xi + ei_conj(j.s()) * yi; - *y = -j.s() * xi + ei_conj(j.c()) * yi; + *x = j.c() * xi + conj(j.s()) * yi; + *y = -j.s() * xi + conj(j.c()) * yi; x += incrx; y += incry; } } } +} #endif // EIGEN_JACOBI_H diff --git a/gtsam/3rdparty/Eigen/src/LU/Determinant.h b/gtsam/3rdparty/Eigen/src/LU/Determinant.h index ea7db9c0f..b4fe36eb0 100644 --- a/gtsam/3rdparty/Eigen/src/LU/Determinant.h +++ b/gtsam/3rdparty/Eigen/src/LU/Determinant.h @@ -25,8 +25,10 @@ #ifndef EIGEN_DETERMINANT_H #define EIGEN_DETERMINANT_H +namespace internal { + template -inline const typename Derived::Scalar ei_bruteforce_det3_helper +inline const typename Derived::Scalar bruteforce_det3_helper (const MatrixBase& matrix, int a, int b, int c) { return matrix.coeff(0,a) @@ -34,7 +36,7 @@ inline const typename Derived::Scalar ei_bruteforce_det3_helper } template -const typename Derived::Scalar ei_bruteforce_det4_helper +const typename Derived::Scalar bruteforce_det4_helper (const MatrixBase& matrix, int j, int k, int m, int n) { return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1)) @@ -43,66 +45,68 @@ const typename Derived::Scalar ei_bruteforce_det4_helper template struct ei_determinant_impl +> struct determinant_impl { - static inline typename ei_traits::Scalar run(const Derived& m) + static inline typename traits::Scalar run(const Derived& m) { if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0) - return typename ei_traits::Scalar(1); + return typename traits::Scalar(1); return m.partialPivLu().determinant(); } }; -template struct ei_determinant_impl +template struct determinant_impl { - static inline typename ei_traits::Scalar run(const Derived& m) + static inline typename traits::Scalar run(const Derived& m) { return m.coeff(0,0); } }; -template struct ei_determinant_impl +template struct determinant_impl { - static inline typename ei_traits::Scalar run(const Derived& m) + static inline typename traits::Scalar run(const Derived& m) { return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1); } }; -template struct ei_determinant_impl +template struct determinant_impl { - static inline typename ei_traits::Scalar run(const Derived& m) + static inline typename traits::Scalar run(const Derived& m) { - return ei_bruteforce_det3_helper(m,0,1,2) - - ei_bruteforce_det3_helper(m,1,0,2) - + ei_bruteforce_det3_helper(m,2,0,1); + return bruteforce_det3_helper(m,0,1,2) + - bruteforce_det3_helper(m,1,0,2) + + bruteforce_det3_helper(m,2,0,1); } }; -template struct ei_determinant_impl +template struct determinant_impl { - static typename ei_traits::Scalar run(const Derived& m) + static typename traits::Scalar run(const Derived& m) { // trick by Martin Costabel to compute 4x4 det with only 30 muls - return ei_bruteforce_det4_helper(m,0,1,2,3) - - ei_bruteforce_det4_helper(m,0,2,1,3) - + ei_bruteforce_det4_helper(m,0,3,1,2) - + ei_bruteforce_det4_helper(m,1,2,0,3) - - ei_bruteforce_det4_helper(m,1,3,0,2) - + ei_bruteforce_det4_helper(m,2,3,0,1); + return bruteforce_det4_helper(m,0,1,2,3) + - bruteforce_det4_helper(m,0,2,1,3) + + bruteforce_det4_helper(m,0,3,1,2) + + bruteforce_det4_helper(m,1,2,0,3) + - bruteforce_det4_helper(m,1,3,0,2) + + bruteforce_det4_helper(m,2,3,0,1); } }; +} // end namespace internal + /** \lu_module * * \returns the determinant of this matrix */ template -inline typename ei_traits::Scalar MatrixBase::determinant() const +inline typename internal::traits::Scalar MatrixBase::determinant() const { assert(rows() == cols()); - typedef typename ei_nested::type Nested; - return ei_determinant_impl::type>::run(derived()); + typedef typename internal::nested::type Nested; + return internal::determinant_impl::type>::run(derived()); } #endif // EIGEN_DETERMINANT_H diff --git a/gtsam/3rdparty/Eigen/src/LU/FullPivLU.h b/gtsam/3rdparty/Eigen/src/LU/FullPivLU.h index 558b1bd90..339d7845c 100644 --- a/gtsam/3rdparty/Eigen/src/LU/FullPivLU.h +++ b/gtsam/3rdparty/Eigen/src/LU/FullPivLU.h @@ -68,10 +68,10 @@ template class FullPivLU }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename ei_traits::StorageKind StorageKind; + typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; - typedef typename ei_plain_row_type::type IntRowVectorType; - typedef typename ei_plain_col_type::type IntColVectorType; + typedef typename internal::plain_row_type::type IntRowVectorType; + typedef typename internal::plain_col_type::type IntColVectorType; typedef PermutationMatrix PermutationQType; typedef PermutationMatrix PermutationPType; @@ -115,7 +115,7 @@ template class FullPivLU */ inline const MatrixType& matrixLU() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return m_lu; } @@ -128,7 +128,7 @@ template class FullPivLU */ inline Index nonzeroPivots() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return m_nonzero_pivots; } @@ -143,7 +143,7 @@ template class FullPivLU */ inline const PermutationPType& permutationP() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return m_p; } @@ -153,7 +153,7 @@ template class FullPivLU */ inline const PermutationQType& permutationQ() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return m_q; } @@ -171,10 +171,10 @@ template class FullPivLU * * \sa image() */ - inline const ei_kernel_retval kernel() const + inline const internal::kernel_retval kernel() const { - ei_assert(m_isInitialized && "LU is not initialized."); - return ei_kernel_retval(*this); + eigen_assert(m_isInitialized && "LU is not initialized."); + return internal::kernel_retval(*this); } /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix @@ -196,11 +196,11 @@ template class FullPivLU * * \sa kernel() */ - inline const ei_image_retval + inline const internal::image_retval image(const MatrixType& originalMatrix) const { - ei_assert(m_isInitialized && "LU is not initialized."); - return ei_image_retval(*this, originalMatrix); + eigen_assert(m_isInitialized && "LU is not initialized."); + return internal::image_retval(*this, originalMatrix); } /** \return a solution x to the equation Ax=b, where A is the matrix of which @@ -223,11 +223,11 @@ template class FullPivLU * \sa TriangularView::solve(), kernel(), inverse() */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "LU is not initialized."); - return ei_solve_retval(*this, b.derived()); + eigen_assert(m_isInitialized && "LU is not initialized."); + return internal::solve_retval(*this, b.derived()); } /** \returns the determinant of the matrix of which @@ -245,7 +245,7 @@ template class FullPivLU * * \sa MatrixBase::determinant() */ - typename ei_traits::Scalar determinant() const; + typename internal::traits::Scalar determinant() const; /** Allows to prescribe a threshold to be used by certain methods, such as rank(), * who need to determine when pivots are to be considered nonzero. This is not used for the @@ -290,7 +290,7 @@ template class FullPivLU */ RealScalar threshold() const { - ei_assert(m_isInitialized || m_usePrescribedThreshold); + eigen_assert(m_isInitialized || m_usePrescribedThreshold); return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. @@ -305,11 +305,11 @@ template class FullPivLU */ inline Index rank() const { - ei_assert(m_isInitialized && "LU is not initialized."); - RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold(); + eigen_assert(m_isInitialized && "LU is not initialized."); + RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); Index result = 0; for(Index i = 0; i < m_nonzero_pivots; ++i) - result += (ei_abs(m_lu.coeff(i,i)) > premultiplied_threshold); + result += (internal::abs(m_lu.coeff(i,i)) > premultiplied_threshold); return result; } @@ -321,7 +321,7 @@ template class FullPivLU */ inline Index dimensionOfKernel() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return cols() - rank(); } @@ -334,7 +334,7 @@ template class FullPivLU */ inline bool isInjective() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return rank() == cols(); } @@ -347,7 +347,7 @@ template class FullPivLU */ inline bool isSurjective() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return rank() == rows(); } @@ -359,7 +359,7 @@ template class FullPivLU */ inline bool isInvertible() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return isInjective() && (m_lu.rows() == m_lu.cols()); } @@ -370,11 +370,11 @@ template class FullPivLU * * \sa MatrixBase::inverse() */ - inline const ei_solve_retval inverse() const + inline const internal::solve_retval inverse() const { - ei_assert(m_isInitialized && "LU is not initialized."); - ei_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); - return ei_solve_retval + eigen_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!"); + return internal::solve_retval (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); } @@ -519,10 +519,10 @@ FullPivLU& FullPivLU::compute(const MatrixType& matrix) } template -typename ei_traits::Scalar FullPivLU::determinant() const +typename internal::traits::Scalar FullPivLU::determinant() const { - ei_assert(m_isInitialized && "LU is not initialized."); - ei_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!"); + eigen_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!"); return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod()); } @@ -532,7 +532,7 @@ typename ei_traits::Scalar FullPivLU::determinant() cons template MatrixType FullPivLU::reconstructedMatrix() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); const Index smalldim = std::min(m_lu.rows(), m_lu.cols()); // LU MatrixType res(m_lu.rows(),m_lu.cols()); @@ -553,9 +553,10 @@ MatrixType FullPivLU::reconstructedMatrix() const /********* Implementation of kernel() **************************************************/ +namespace internal { template -struct ei_kernel_retval > - : ei_kernel_retval_base > +struct kernel_retval > + : kernel_retval_base > { EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>) @@ -596,9 +597,9 @@ struct ei_kernel_retval > RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); Index p = 0; for(Index i = 0; i < dec().nonzeroPivots(); ++i) - if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) + if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; - ei_internal_assert(p == rank()); + eigen_internal_assert(p == rank()); // we construct a temporaty trapezoid matrix m, by taking the U matrix and // permuting the rows and cols to bring the nonnegligible pivots to the top of @@ -639,8 +640,8 @@ struct ei_kernel_retval > /***** Implementation of image() *****************************************************/ template -struct ei_image_retval > - : ei_image_retval_base > +struct image_retval > + : image_retval_base > { EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>) @@ -664,9 +665,9 @@ struct ei_image_retval > RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); Index p = 0; for(Index i = 0; i < dec().nonzeroPivots(); ++i) - if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) + if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; - ei_internal_assert(p == rank()); + eigen_internal_assert(p == rank()); for(Index i = 0; i < rank(); ++i) dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i))); @@ -676,8 +677,8 @@ struct ei_image_retval > /***** Implementation of solve() *****************************************************/ template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(FullPivLU<_MatrixType>,Rhs) @@ -693,7 +694,7 @@ struct ei_solve_retval, Rhs> const Index rows = dec().rows(), cols = dec().cols(), nonzero_pivots = dec().nonzeroPivots(); - ei_assert(rhs().rows() == rows); + eigen_assert(rhs().rows() == rows); const Index smalldim = std::min(rows, cols); if(nonzero_pivots == 0) @@ -733,6 +734,8 @@ struct ei_solve_retval, Rhs> } }; +} // end namespace internal + /******* MatrixBase methods *****************************************************************/ /** \lu_module diff --git a/gtsam/3rdparty/Eigen/src/LU/Inverse.h b/gtsam/3rdparty/Eigen/src/LU/Inverse.h index b587e3309..2d3e6d105 100644 --- a/gtsam/3rdparty/Eigen/src/LU/Inverse.h +++ b/gtsam/3rdparty/Eigen/src/LU/Inverse.h @@ -25,12 +25,14 @@ #ifndef EIGEN_INVERSE_H #define EIGEN_INVERSE_H +namespace internal { + /********************************** *** General case implementation *** **********************************/ template -struct ei_compute_inverse +struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { @@ -39,14 +41,14 @@ struct ei_compute_inverse }; template -struct ei_compute_inverse_and_det_with_check { /* nothing! general case not supported. */ }; +struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ }; /**************************** *** Size 1 implementation *** ****************************/ template -struct ei_compute_inverse +struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { @@ -56,7 +58,7 @@ struct ei_compute_inverse }; template -struct ei_compute_inverse_and_det_with_check +struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, @@ -67,7 +69,7 @@ struct ei_compute_inverse_and_det_with_check ) { determinant = matrix.coeff(0,0); - invertible = ei_abs(determinant) > absDeterminantThreshold; + invertible = abs(determinant) > absDeterminantThreshold; if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant; } }; @@ -77,7 +79,7 @@ struct ei_compute_inverse_and_det_with_check ****************************/ template -inline void ei_compute_inverse_size2_helper( +inline void compute_inverse_size2_helper( const MatrixType& matrix, const typename ResultType::Scalar& invdet, ResultType& result) { @@ -88,18 +90,18 @@ inline void ei_compute_inverse_size2_helper( } template -struct ei_compute_inverse +struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { typedef typename ResultType::Scalar Scalar; const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant(); - ei_compute_inverse_size2_helper(matrix, invdet, result); + compute_inverse_size2_helper(matrix, invdet, result); } }; template -struct ei_compute_inverse_and_det_with_check +struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, @@ -111,10 +113,10 @@ struct ei_compute_inverse_and_det_with_check { typedef typename ResultType::Scalar Scalar; determinant = matrix.determinant(); - invertible = ei_abs(determinant) > absDeterminantThreshold; + invertible = abs(determinant) > absDeterminantThreshold; if(!invertible) return; const Scalar invdet = Scalar(1) / determinant; - ei_compute_inverse_size2_helper(matrix, invdet, inverse); + compute_inverse_size2_helper(matrix, invdet, inverse); } }; @@ -123,7 +125,7 @@ struct ei_compute_inverse_and_det_with_check ****************************/ template -inline typename MatrixType::Scalar ei_3x3_cofactor(const MatrixType& m) +inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m) { enum { i1 = (i+1) % 3, @@ -136,39 +138,39 @@ inline typename MatrixType::Scalar ei_3x3_cofactor(const MatrixType& m) } template -inline void ei_compute_inverse_size3_helper( +inline void compute_inverse_size3_helper( const MatrixType& matrix, const typename ResultType::Scalar& invdet, const Matrix& cofactors_col0, ResultType& result) { result.row(0) = cofactors_col0 * invdet; - result.coeffRef(1,0) = ei_3x3_cofactor(matrix) * invdet; - result.coeffRef(1,1) = ei_3x3_cofactor(matrix) * invdet; - result.coeffRef(1,2) = ei_3x3_cofactor(matrix) * invdet; - result.coeffRef(2,0) = ei_3x3_cofactor(matrix) * invdet; - result.coeffRef(2,1) = ei_3x3_cofactor(matrix) * invdet; - result.coeffRef(2,2) = ei_3x3_cofactor(matrix) * invdet; + result.coeffRef(1,0) = cofactor_3x3(matrix) * invdet; + result.coeffRef(1,1) = cofactor_3x3(matrix) * invdet; + result.coeffRef(1,2) = cofactor_3x3(matrix) * invdet; + result.coeffRef(2,0) = cofactor_3x3(matrix) * invdet; + result.coeffRef(2,1) = cofactor_3x3(matrix) * invdet; + result.coeffRef(2,2) = cofactor_3x3(matrix) * invdet; } template -struct ei_compute_inverse +struct compute_inverse { static inline void run(const MatrixType& matrix, ResultType& result) { typedef typename ResultType::Scalar Scalar; - Matrix cofactors_col0; - cofactors_col0.coeffRef(0) = ei_3x3_cofactor(matrix); - cofactors_col0.coeffRef(1) = ei_3x3_cofactor(matrix); - cofactors_col0.coeffRef(2) = ei_3x3_cofactor(matrix); + Matrix cofactors_col0; + cofactors_col0.coeffRef(0) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(1) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(2) = cofactor_3x3(matrix); const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); const Scalar invdet = Scalar(1) / det; - ei_compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result); + compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result); } }; template -struct ei_compute_inverse_and_det_with_check +struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, @@ -180,14 +182,14 @@ struct ei_compute_inverse_and_det_with_check { typedef typename ResultType::Scalar Scalar; Matrix cofactors_col0; - cofactors_col0.coeffRef(0) = ei_3x3_cofactor(matrix); - cofactors_col0.coeffRef(1) = ei_3x3_cofactor(matrix); - cofactors_col0.coeffRef(2) = ei_3x3_cofactor(matrix); + cofactors_col0.coeffRef(0) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(1) = cofactor_3x3(matrix); + cofactors_col0.coeffRef(2) = cofactor_3x3(matrix); determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum(); - invertible = ei_abs(determinant) > absDeterminantThreshold; + invertible = abs(determinant) > absDeterminantThreshold; if(!invertible) return; const Scalar invdet = Scalar(1) / determinant; - ei_compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse); + compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse); } }; @@ -196,7 +198,7 @@ struct ei_compute_inverse_and_det_with_check ****************************/ template -inline const typename Derived::Scalar ei_general_det3_helper +inline const typename Derived::Scalar general_det3_helper (const MatrixBase& matrix, int i1, int i2, int i3, int j1, int j2, int j3) { return matrix.coeff(i1,j1) @@ -204,7 +206,7 @@ inline const typename Derived::Scalar ei_general_det3_helper } template -inline typename MatrixType::Scalar ei_4x4_cofactor(const MatrixType& matrix) +inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix) { enum { i1 = (i+1) % 4, @@ -214,45 +216,45 @@ inline typename MatrixType::Scalar ei_4x4_cofactor(const MatrixType& matrix) j2 = (j+2) % 4, j3 = (j+3) % 4 }; - return ei_general_det3_helper(matrix, i1, i2, i3, j1, j2, j3) - + ei_general_det3_helper(matrix, i2, i3, i1, j1, j2, j3) - + ei_general_det3_helper(matrix, i3, i1, i2, j1, j2, j3); + return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3) + + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3) + + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3); } template -struct ei_compute_inverse_size4 +struct compute_inverse_size4 { static void run(const MatrixType& matrix, ResultType& result) { - result.coeffRef(0,0) = ei_4x4_cofactor(matrix); - result.coeffRef(1,0) = -ei_4x4_cofactor(matrix); - result.coeffRef(2,0) = ei_4x4_cofactor(matrix); - result.coeffRef(3,0) = -ei_4x4_cofactor(matrix); - result.coeffRef(0,2) = ei_4x4_cofactor(matrix); - result.coeffRef(1,2) = -ei_4x4_cofactor(matrix); - result.coeffRef(2,2) = ei_4x4_cofactor(matrix); - result.coeffRef(3,2) = -ei_4x4_cofactor(matrix); - result.coeffRef(0,1) = -ei_4x4_cofactor(matrix); - result.coeffRef(1,1) = ei_4x4_cofactor(matrix); - result.coeffRef(2,1) = -ei_4x4_cofactor(matrix); - result.coeffRef(3,1) = ei_4x4_cofactor(matrix); - result.coeffRef(0,3) = -ei_4x4_cofactor(matrix); - result.coeffRef(1,3) = ei_4x4_cofactor(matrix); - result.coeffRef(2,3) = -ei_4x4_cofactor(matrix); - result.coeffRef(3,3) = ei_4x4_cofactor(matrix); + result.coeffRef(0,0) = cofactor_4x4(matrix); + result.coeffRef(1,0) = -cofactor_4x4(matrix); + result.coeffRef(2,0) = cofactor_4x4(matrix); + result.coeffRef(3,0) = -cofactor_4x4(matrix); + result.coeffRef(0,2) = cofactor_4x4(matrix); + result.coeffRef(1,2) = -cofactor_4x4(matrix); + result.coeffRef(2,2) = cofactor_4x4(matrix); + result.coeffRef(3,2) = -cofactor_4x4(matrix); + result.coeffRef(0,1) = -cofactor_4x4(matrix); + result.coeffRef(1,1) = cofactor_4x4(matrix); + result.coeffRef(2,1) = -cofactor_4x4(matrix); + result.coeffRef(3,1) = cofactor_4x4(matrix); + result.coeffRef(0,3) = -cofactor_4x4(matrix); + result.coeffRef(1,3) = cofactor_4x4(matrix); + result.coeffRef(2,3) = -cofactor_4x4(matrix); + result.coeffRef(3,3) = cofactor_4x4(matrix); result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum(); } }; template -struct ei_compute_inverse - : ei_compute_inverse_size4 + : compute_inverse_size4 { }; template -struct ei_compute_inverse_and_det_with_check +struct compute_inverse_and_det_with_check { static inline void run( const MatrixType& matrix, @@ -263,8 +265,8 @@ struct ei_compute_inverse_and_det_with_check ) { determinant = matrix.determinant(); - invertible = ei_abs(determinant) > absDeterminantThreshold; - if(invertible) ei_compute_inverse::run(matrix, inverse); + invertible = abs(determinant) > absDeterminantThreshold; + if(invertible) compute_inverse::run(matrix, inverse); } }; @@ -273,20 +275,20 @@ struct ei_compute_inverse_and_det_with_check *************************/ template -struct ei_traits > +struct traits > { typedef typename MatrixType::PlainObject ReturnType; }; template -struct ei_inverse_impl : public ReturnByValue > +struct inverse_impl : public ReturnByValue > { typedef typename MatrixType::Index Index; - typedef typename ei_eval::type MatrixTypeNested; - typedef typename ei_cleantype::type MatrixTypeNestedCleaned; + typedef typename internal::eval::type MatrixTypeNested; + typedef typename remove_all::type MatrixTypeNestedCleaned; const MatrixTypeNested m_matrix; - ei_inverse_impl(const MatrixType& matrix) + inverse_impl(const MatrixType& matrix) : m_matrix(matrix) {} @@ -297,13 +299,15 @@ struct ei_inverse_impl : public ReturnByValue > { const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime); EIGEN_ONLY_USED_FOR_DEBUG(Size); - ei_assert(( (Size<=1) || (Size>4) || (ei_extract_data(m_matrix)!=ei_extract_data(dst))) + eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst))) && "Aliasing problem detected in inverse(), you need to do inverse().eval() here."); - ei_compute_inverse::run(m_matrix, dst); + compute_inverse::run(m_matrix, dst); } }; +} // end namespace internal + /** \lu_module * * \returns the matrix inverse of this matrix. @@ -322,11 +326,11 @@ struct ei_inverse_impl : public ReturnByValue > * \sa computeInverseAndDetWithCheck() */ template -inline const ei_inverse_impl MatrixBase::inverse() const +inline const internal::inverse_impl MatrixBase::inverse() const { EIGEN_STATIC_ASSERT(!NumTraits::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) - ei_assert(rows() == cols()); - return ei_inverse_impl(derived()); + eigen_assert(rows() == cols()); + return internal::inverse_impl(derived()); } /** \lu_module @@ -357,15 +361,15 @@ inline void MatrixBase::computeInverseAndDetWithCheck( ) const { // i'd love to put some static assertions there, but SFINAE means that they have no effect... - ei_assert(rows() == cols()); + eigen_assert(rows() == cols()); // for 2x2, it's worth giving a chance to avoid evaluating. // for larger sizes, evaluating has negligible cost and limits code size. - typedef typename ei_meta_if< + typedef typename internal::conditional< RowsAtCompileTime == 2, - typename ei_cleantype::type>::type, + typename internal::remove_all::type>::type, PlainObject - >::ret MatrixType; - ei_compute_inverse_and_det_with_check::run + >::type MatrixType; + internal::compute_inverse_and_det_with_check::run (derived(), absDeterminantThreshold, inverse, determinant, invertible); } @@ -396,7 +400,7 @@ inline void MatrixBase::computeInverseWithCheck( { RealScalar determinant; // i'd love to put some static assertions there, but SFINAE means that they have no effect... - ei_assert(rows() == cols()); + eigen_assert(rows() == cols()); computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold); } diff --git a/gtsam/3rdparty/Eigen/src/LU/PartialPivLU.h b/gtsam/3rdparty/Eigen/src/LU/PartialPivLU.h index fe91ecd3f..2533a3874 100644 --- a/gtsam/3rdparty/Eigen/src/LU/PartialPivLU.h +++ b/gtsam/3rdparty/Eigen/src/LU/PartialPivLU.h @@ -71,7 +71,7 @@ template class PartialPivLU }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename ei_traits::StorageKind StorageKind; + typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; typedef PermutationMatrix PermutationType; typedef Transpositions TranspositionType; @@ -112,7 +112,7 @@ template class PartialPivLU */ inline const MatrixType& matrixLU() const { - ei_assert(m_isInitialized && "PartialPivLU is not initialized."); + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return m_lu; } @@ -120,7 +120,7 @@ template class PartialPivLU */ inline const PermutationType& permutationP() const { - ei_assert(m_isInitialized && "PartialPivLU is not initialized."); + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return m_p; } @@ -142,11 +142,11 @@ template class PartialPivLU * \sa TriangularView::solve(), inverse(), computeInverse() */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "PartialPivLU is not initialized."); - return ei_solve_retval(*this, b.derived()); + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return internal::solve_retval(*this, b.derived()); } /** \returns the inverse of the matrix of which *this is the LU decomposition. @@ -156,10 +156,10 @@ template class PartialPivLU * * \sa MatrixBase::inverse(), LU::inverse() */ - inline const ei_solve_retval inverse() const + inline const internal::solve_retval inverse() const { - ei_assert(m_isInitialized && "PartialPivLU is not initialized."); - return ei_solve_retval + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return internal::solve_retval (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols())); } @@ -176,7 +176,7 @@ template class PartialPivLU * * \sa MatrixBase::determinant() */ - typename ei_traits::Scalar determinant() const; + typename internal::traits::Scalar determinant() const; MatrixType reconstructedMatrix() const; @@ -222,9 +222,11 @@ PartialPivLU::PartialPivLU(const MatrixType& matrix) compute(matrix); } -/** \internal This is the blocked version of ei_fullpivlu_unblocked() */ -template -struct ei_partial_lu_impl +namespace internal { + +/** \internal This is the blocked version of fullpivlu_unblocked() */ +template +struct partial_lu_impl { // FIXME add a stride to Map, so that the following mapping becomes easier, // another option would be to create an expression being able to automatically @@ -245,51 +247,50 @@ struct ei_partial_lu_impl * of columns of the matrix \a lu, and an integer \a nb_transpositions * which returns the actual number of transpositions. * - * \returns false if some pivot is exactly zero, in which case the matrix is left with - * undefined coefficients (to avoid generating inf/nan values). Returns true - * otherwise. + * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise. */ - static bool unblocked_lu(MatrixType& lu, Index* row_transpositions, Index& nb_transpositions) + static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions) { const Index rows = lu.rows(); - const Index size = std::min(lu.rows(),lu.cols()); + const Index cols = lu.cols(); + const Index size = std::min(rows,cols); nb_transpositions = 0; + int first_zero_pivot = -1; for(Index k = 0; k < size; ++k) { + Index rrows = rows-k-1; + Index rcols = cols-k-1; + Index row_of_biggest_in_col; RealScalar biggest_in_corner = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col); row_of_biggest_in_col += k; - if(biggest_in_corner == 0) // the pivot is exactly zero: the matrix is singular - { - // end quickly, avoid generating inf/nan values. Although in this unblocked_lu case - // the result is still valid, there's no need to boast about it because - // the blocked_lu code can't guarantee the same. - // before exiting, make sure to initialize the still uninitialized row_transpositions - // in a sane state without destroying what we already have. - for(Index i = k; i < size; i++) - row_transpositions[i] = i; - return false; - } - row_transpositions[k] = row_of_biggest_in_col; - if(k != row_of_biggest_in_col) + if(biggest_in_corner != 0) { - lu.row(k).swap(lu.row(row_of_biggest_in_col)); - ++nb_transpositions; + if(k != row_of_biggest_in_col) + { + lu.row(k).swap(lu.row(row_of_biggest_in_col)); + ++nb_transpositions; + } + + // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k) + // overflow but not the actual quotient? + lu.col(k).tail(rrows) /= lu.coeff(k,k); + } + else if(first_zero_pivot==-1) + { + // the pivot is exactly zero, we record the index of the first pivot which is exactly 0, + // and continue the factorization such we still have A = PLU + first_zero_pivot = k; } if(k > > */ - static bool blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, Index* row_transpositions, Index& nb_transpositions, Index maxBlockSize=256) + static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256) { MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols); MatrixType lu(lu1,0,0,rows,cols); @@ -332,6 +331,7 @@ struct ei_partial_lu_impl } nb_transpositions = 0; + int first_zero_pivot = -1; for(Index k = 0; k < size; k+=blockSize) { Index bs = std::min(size-k,blockSize); // actual size of the block @@ -349,21 +349,15 @@ struct ei_partial_lu_impl BlockType A21(lu,k+bs,k,trows,bs); BlockType A22(lu,k+bs,k+bs,trows,tsize); - Index nb_transpositions_in_panel; + PivIndex nb_transpositions_in_panel; // recursively call the blocked LU algorithm on [A11^T A21^T]^T // with a very small blocking size: - if(!blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride, - row_transpositions+k, nb_transpositions_in_panel, 16)) - { - // end quickly with undefined coefficients, just avoid generating inf/nan values. - // before exiting, make sure to initialize the still uninitialized row_transpositions - // in a sane state without destroying what we already have. - for(Index i=k; i=0 && first_zero_pivot==-1) + first_zero_pivot = k+ret; + nb_transpositions += nb_transpositions_in_panel; // update permutations and apply them to A_0 for(Index i=k; i -void ei_partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename MatrixType::Index& nb_transpositions) +void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::Index& nb_transpositions) { - ei_assert(lu.cols() == row_transpositions.size()); - ei_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); + eigen_assert(lu.cols() == row_transpositions.size()); + eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); - ei_partial_lu_impl - + partial_lu_impl + ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions); } +} // end namespace internal + template PartialPivLU& PartialPivLU::compute(const MatrixType& matrix) { m_lu = matrix; - ei_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); + eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); const Index size = matrix.rows(); m_rowsTranspositions.resize(size); - Index nb_transpositions; - ei_partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); + typename TranspositionType::Index nb_transpositions; + internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); m_det_p = (nb_transpositions%2) ? -1 : 1; m_p = m_rowsTranspositions; @@ -421,9 +417,9 @@ PartialPivLU& PartialPivLU::compute(const MatrixType& ma } template -typename ei_traits::Scalar PartialPivLU::determinant() const +typename internal::traits::Scalar PartialPivLU::determinant() const { - ei_assert(m_isInitialized && "PartialPivLU is not initialized."); + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return Scalar(m_det_p) * m_lu.diagonal().prod(); } @@ -433,7 +429,7 @@ typename ei_traits::Scalar PartialPivLU::determinant() c template MatrixType PartialPivLU::reconstructedMatrix() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); // LU MatrixType res = m_lu.template triangularView().toDenseMatrix() * m_lu.template triangularView(); @@ -446,9 +442,11 @@ MatrixType PartialPivLU::reconstructedMatrix() const /***** Implementation of solve() *****************************************************/ +namespace internal { + template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(PartialPivLU<_MatrixType>,Rhs) @@ -461,7 +459,7 @@ struct ei_solve_retval, Rhs> * Step 3: replace c by the solution x to Ux = c. */ - ei_assert(rhs().rows() == dec().matrixLU().rows()); + eigen_assert(rhs().rows() == dec().matrixLU().rows()); // Step 1 dst = dec().permutationP() * rhs(); @@ -474,6 +472,8 @@ struct ei_solve_retval, Rhs> } }; +} // end namespace internal + /******** MatrixBase methods *******/ /** \lu_module @@ -489,6 +489,7 @@ MatrixBase::partialPivLu() const return PartialPivLU(eval()); } +#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS /** \lu_module * * Synonym of partialPivLu(). @@ -503,5 +504,6 @@ MatrixBase::lu() const { return PartialPivLU(eval()); } +#endif #endif // EIGEN_PARTIALLU_H diff --git a/gtsam/3rdparty/Eigen/src/LU/arch/Inverse_SSE.h b/gtsam/3rdparty/Eigen/src/LU/arch/Inverse_SSE.h index 6d497d326..0fe9be388 100644 --- a/gtsam/3rdparty/Eigen/src/LU/arch/Inverse_SSE.h +++ b/gtsam/3rdparty/Eigen/src/LU/arch/Inverse_SSE.h @@ -42,8 +42,10 @@ #ifndef EIGEN_INVERSE_SSE_H #define EIGEN_INVERSE_SSE_H +namespace internal { + template -struct ei_compute_inverse_size4 +struct compute_inverse_size4 { enum { MatrixAlignment = bool(MatrixType::Flags&AlignedBit), @@ -171,7 +173,7 @@ struct ei_compute_inverse_size4 -struct ei_compute_inverse_size4 +struct compute_inverse_size4 { enum { MatrixAlignment = bool(MatrixType::Flags&AlignedBit), @@ -333,4 +335,6 @@ struct ei_compute_inverse_size4 class ColPivHouseholderQR typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef Matrix MatrixQType; - typedef typename ei_plain_diag_type::type HCoeffsType; + typedef typename internal::plain_diag_type::type HCoeffsType; typedef PermutationMatrix PermutationType; - typedef typename ei_plain_row_type::type IntRowVectorType; - typedef typename ei_plain_row_type::type RowVectorType; - typedef typename ei_plain_row_type::type RealRowVectorType; + typedef typename internal::plain_row_type::type IntRowVectorType; + typedef typename internal::plain_row_type::type RowVectorType; + typedef typename internal::plain_row_type::type RealRowVectorType; typedef typename HouseholderSequence::ConjugateReturnType HouseholderSequenceType; /** @@ -132,11 +132,11 @@ template class ColPivHouseholderQR * Output: \verbinclude ColPivHouseholderQR_solve.out */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - return ei_solve_retval(*this, b.derived()); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return internal::solve_retval(*this, b.derived()); } HouseholderSequenceType householderQ(void) const; @@ -145,7 +145,7 @@ template class ColPivHouseholderQR */ const MatrixType& matrixQR() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return m_qr; } @@ -153,7 +153,7 @@ template class ColPivHouseholderQR const PermutationType& colsPermutation() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return m_colsPermutation; } @@ -194,11 +194,11 @@ template class ColPivHouseholderQR */ inline Index rank() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold(); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); Index result = 0; for(Index i = 0; i < m_nonzero_pivots; ++i) - result += (ei_abs(m_qr.coeff(i,i)) > premultiplied_threshold); + result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold); return result; } @@ -210,7 +210,7 @@ template class ColPivHouseholderQR */ inline Index dimensionOfKernel() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return cols() - rank(); } @@ -223,7 +223,7 @@ template class ColPivHouseholderQR */ inline bool isInjective() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return rank() == cols(); } @@ -236,7 +236,7 @@ template class ColPivHouseholderQR */ inline bool isSurjective() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return rank() == rows(); } @@ -248,7 +248,7 @@ template class ColPivHouseholderQR */ inline bool isInvertible() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return isInjective() && isSurjective(); } @@ -258,11 +258,11 @@ template class ColPivHouseholderQR * Use isInvertible() to first determine whether this matrix is invertible. */ inline const - ei_solve_retval + internal::solve_retval inverse() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - return ei_solve_retval + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return internal::solve_retval (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); } @@ -314,7 +314,7 @@ template class ColPivHouseholderQR */ RealScalar threshold() const { - ei_assert(m_isInitialized || m_usePrescribedThreshold); + eigen_assert(m_isInitialized || m_usePrescribedThreshold); return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. @@ -330,7 +330,7 @@ template class ColPivHouseholderQR */ inline Index nonzeroPivots() const { - ei_assert(m_isInitialized && "LU is not initialized."); + eigen_assert(m_isInitialized && "LU is not initialized."); return m_nonzero_pivots; } @@ -355,16 +355,16 @@ template class ColPivHouseholderQR template typename MatrixType::RealScalar ColPivHouseholderQR::absDeterminant() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - ei_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); - return ei_abs(m_qr.diagonal().prod()); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return internal::abs(m_qr.diagonal().prod()); } template typename MatrixType::RealScalar ColPivHouseholderQR::logAbsDeterminant() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - ei_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return m_qr.diagonal().cwiseAbs().array().log().sum(); } @@ -387,7 +387,7 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const for(Index k = 0; k < cols; ++k) m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm(); - RealScalar threshold_helper = m_colSqNorms.maxCoeff() * ei_abs2(NumTraits::epsilon()) / rows; + RealScalar threshold_helper = m_colSqNorms.maxCoeff() * internal::abs2(NumTraits::epsilon()) / rows; m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_maxpivot = RealScalar(0); @@ -439,7 +439,7 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const m_qr.coeffRef(k,k) = beta; // remember the maximum absolute value of diagonal coefficients - if(ei_abs(beta) > m_maxpivot) m_maxpivot = ei_abs(beta); + if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta); // apply the householder transformation m_qr.bottomRightCorner(rows-k, cols-k-1) @@ -459,15 +459,17 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const return *this; } +namespace internal { + template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(ColPivHouseholderQR<_MatrixType>,Rhs) template void evalTo(Dest& dst) const { - ei_assert(rhs().rows() == dec().rows()); + eigen_assert(rhs().rows() == dec().rows()); const int cols = dec().cols(), nonzero_pivots = dec().nonzeroPivots(); @@ -481,13 +483,10 @@ struct ei_solve_retval, Rhs> typename Rhs::PlainObject c(rhs()); // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T - c.applyOnTheLeft(householderSequence( - dec().matrixQR(), - dec().hCoeffs(), - true, - dec().nonzeroPivots(), - 0 - )); + c.applyOnTheLeft(householderSequence(dec().matrixQR(), dec().hCoeffs()) + .setLength(dec().nonzeroPivots()) + .transpose() + ); dec().matrixQR() .topLeftCorner(nonzero_pivots, nonzero_pivots) @@ -507,13 +506,15 @@ struct ei_solve_retval, Rhs> } }; +} // end namespace internal + /** \returns the matrix Q as a sequence of householder transformations */ template typename ColPivHouseholderQR::HouseholderSequenceType ColPivHouseholderQR ::householderQ() const { - ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate(), false, m_nonzero_pivots, 0); + eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); + return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots); } /** \return the column-pivoting Householder QR decomposition of \c *this. diff --git a/gtsam/3rdparty/Eigen/src/QR/FullPivHouseholderQR.h b/gtsam/3rdparty/Eigen/src/QR/FullPivHouseholderQR.h index e228aeb44..7f1d98c54 100644 --- a/gtsam/3rdparty/Eigen/src/QR/FullPivHouseholderQR.h +++ b/gtsam/3rdparty/Eigen/src/QR/FullPivHouseholderQR.h @@ -63,12 +63,12 @@ template class FullPivHouseholderQR typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef Matrix MatrixQType; - typedef typename ei_plain_diag_type::type HCoeffsType; + typedef typename internal::plain_diag_type::type HCoeffsType; typedef Matrix IntRowVectorType; typedef PermutationMatrix PermutationType; - typedef typename ei_plain_col_type::type IntColVectorType; - typedef typename ei_plain_row_type::type RowVectorType; - typedef typename ei_plain_col_type::type ColVectorType; + typedef typename internal::plain_col_type::type IntColVectorType; + typedef typename internal::plain_row_type::type RowVectorType; + typedef typename internal::plain_col_type::type ColVectorType; /** \brief Default Constructor. * @@ -82,7 +82,8 @@ template class FullPivHouseholderQR m_cols_transpositions(), m_cols_permutation(), m_temp(), - m_isInitialized(false) {} + m_isInitialized(false), + m_usePrescribedThreshold(false) {} /** \brief Default Constructor with memory preallocation * @@ -97,7 +98,8 @@ template class FullPivHouseholderQR m_cols_transpositions(cols), m_cols_permutation(cols), m_temp(std::min(rows,cols)), - m_isInitialized(false) {} + m_isInitialized(false), + m_usePrescribedThreshold(false) {} FullPivHouseholderQR(const MatrixType& matrix) : m_qr(matrix.rows(), matrix.cols()), @@ -106,7 +108,8 @@ template class FullPivHouseholderQR m_cols_transpositions(matrix.cols()), m_cols_permutation(matrix.cols()), m_temp(std::min(matrix.rows(), matrix.cols())), - m_isInitialized(false) + m_isInitialized(false), + m_usePrescribedThreshold(false) { compute(matrix); } @@ -129,11 +132,11 @@ template class FullPivHouseholderQR * Output: \verbinclude FullPivHouseholderQR_solve.out */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return ei_solve_retval(*this, b.derived()); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return internal::solve_retval(*this, b.derived()); } MatrixQType matrixQ(void) const; @@ -142,7 +145,7 @@ template class FullPivHouseholderQR */ const MatrixType& matrixQR() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_qr; } @@ -150,13 +153,13 @@ template class FullPivHouseholderQR const PermutationType& colsPermutation() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_cols_permutation; } const IntColVectorType& rowsTranspositions() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_rows_transpositions; } @@ -191,58 +194,67 @@ template class FullPivHouseholderQR /** \returns the rank of the matrix of which *this is the QR decomposition. * - * \note This is computed at the time of the construction of the QR decomposition. This - * method does not perform any further computation. + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). */ inline Index rank() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return m_rank; + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); + Index result = 0; + for(Index i = 0; i < m_nonzero_pivots; ++i) + result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold); + return result; } /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. * - * \note Since the rank is computed at the time of the construction of the QR decomposition, this - * method almost does not perform any further computation. + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). */ inline Index dimensionOfKernel() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return m_qr.cols() - m_rank; + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return cols() - rank(); } /** \returns true if the matrix of which *this is the QR decomposition represents an injective * linear map, i.e. has trivial kernel; false otherwise. * - * \note Since the rank is computed at the time of the construction of the QR decomposition, this - * method almost does not perform any further computation. + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). */ inline bool isInjective() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return m_rank == m_qr.cols(); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return rank() == cols(); } /** \returns true if the matrix of which *this is the QR decomposition represents a surjective * linear map; false otherwise. * - * \note Since the rank is computed at the time of the construction of the QR decomposition, this - * method almost does not perform any further computation. + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). */ inline bool isSurjective() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return m_rank == m_qr.rows(); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return rank() == rows(); } /** \returns true if the matrix of which *this is the QR decomposition is invertible. * - * \note Since the rank is computed at the time of the construction of the QR decomposition, this - * method almost does not perform any further computation. + * \note This method has to determine which pivots should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). */ inline bool isInvertible() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return isInjective() && isSurjective(); } @@ -251,11 +263,11 @@ template class FullPivHouseholderQR * \note If this matrix is not invertible, the returned matrix has undefined coefficients. * Use isInvertible() to first determine whether this matrix is invertible. */ inline const - ei_solve_retval + internal::solve_retval inverse() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - return ei_solve_retval + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + return internal::solve_retval (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); } @@ -263,6 +275,75 @@ template class FullPivHouseholderQR inline Index cols() const { return m_qr.cols(); } const HCoeffsType& hCoeffs() const { return m_hCoeffs; } + /** Allows to prescribe a threshold to be used by certain methods, such as rank(), + * who need to determine when pivots are to be considered nonzero. This is not used for the + * QR decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). By default, this + * uses a formula to automatically determine a reasonable threshold. + * Once you have called the present method setThreshold(const RealScalar&), + * your value is used instead. + * + * \param threshold The new value to use as the threshold. + * + * A pivot will be considered nonzero if its absolute value is strictly greater than + * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ + * where maxpivot is the biggest pivot. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + FullPivHouseholderQR& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return *this; + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code qr.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + FullPivHouseholderQR& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return *this; + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + return m_usePrescribedThreshold ? m_prescribedThreshold + // this formula comes from experimenting (see "LU precision tuning" thread on the list) + // and turns out to be identical to Higham's formula used already in LDLt. + : NumTraits::epsilon() * m_qr.diagonalSize(); + } + + /** \returns the number of nonzero pivots in the QR decomposition. + * Here nonzero is meant in the exact sense, not in a fuzzy sense. + * So that notion isn't really intrinsically interesting, but it is + * still useful when implementing algorithms. + * + * \sa rank() + */ + inline Index nonzeroPivots() const + { + eigen_assert(m_isInitialized && "LU is not initialized."); + return m_nonzero_pivots; + } + + /** \returns the absolute value of the biggest pivot, i.e. the biggest + * diagonal coefficient of U. + */ + RealScalar maxPivot() const { return m_maxpivot; } + protected: MatrixType m_qr; HCoeffsType m_hCoeffs; @@ -270,25 +351,26 @@ template class FullPivHouseholderQR IntRowVectorType m_cols_transpositions; PermutationType m_cols_permutation; RowVectorType m_temp; - bool m_isInitialized; + bool m_isInitialized, m_usePrescribedThreshold; + RealScalar m_prescribedThreshold, m_maxpivot; + Index m_nonzero_pivots; RealScalar m_precision; - Index m_rank; Index m_det_pq; }; template typename MatrixType::RealScalar FullPivHouseholderQR::absDeterminant() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - ei_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); - return ei_abs(m_qr.diagonal().prod()); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return internal::abs(m_qr.diagonal().prod()); } template typename MatrixType::RealScalar FullPivHouseholderQR::logAbsDeterminant() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); - ei_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return m_qr.diagonal().cwiseAbs().array().log().sum(); } @@ -298,7 +380,6 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons Index rows = matrix.rows(); Index cols = matrix.cols(); Index size = std::min(rows,cols); - m_rank = size; m_qr = matrix; m_hCoeffs.resize(size); @@ -313,6 +394,9 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons RealScalar biggest(0); + m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) + m_maxpivot = RealScalar(0); + for (Index k = 0; k < size; ++k) { Index row_of_biggest_in_corner, col_of_biggest_in_corner; @@ -326,9 +410,9 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons if(k==0) biggest = biggest_in_corner; // if the corner is negligible, then we have less than full rank, and we can finish early - if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) + if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) { - m_rank = k; + m_nonzero_pivots = k; for(Index i = k; i < size; i++) { m_rows_transpositions.coeffRef(i) = i; @@ -353,6 +437,9 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); m_qr.coeffRef(k,k) = beta; + // remember the maximum absolute value of diagonal coefficients + if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta); + m_qr.bottomRightCorner(rows-k, cols-k-1) .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); } @@ -367,16 +454,18 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons return *this; } +namespace internal { + template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(FullPivHouseholderQR<_MatrixType>,Rhs) template void evalTo(Dest& dst) const { const Index rows = dec().rows(), cols = dec().cols(); - ei_assert(rhs().rows() == rows); + eigen_assert(rhs().rows() == rows); // FIXME introduce nonzeroPivots() and use it here. and more generally, // make the same improvements in this dec as in FullPivLU. @@ -405,7 +494,8 @@ struct ei_solve_retval, Rhs> RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff(); // FIXME brain dead const RealScalar m_precision = NumTraits::epsilon() * std::min(rows,cols); - if(!ei_isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision)) + // this internal:: prefix is needed by at least gcc 3.4 and ICC + if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision)) return; } dec().matrixQR() @@ -418,11 +508,13 @@ struct ei_solve_retval, Rhs> } }; +} // end namespace internal + /** \returns the matrix Q */ template typename FullPivHouseholderQR::MatrixQType FullPivHouseholderQR::matrixQ() const { - ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); // compute the product H'_0 H'_1 ... H'_n-1, // where H_k is the k-th Householder transformation I - h_k v_k v_k' // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] @@ -434,7 +526,7 @@ typename FullPivHouseholderQR::MatrixQType FullPivHouseholderQR= 0; k--) { res.block(k, k, rows-k, rows-k) - .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k)); + .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), internal::conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k)); res.row(k).swap(res.row(m_rows_transpositions.coeff(k))); } return res; diff --git a/gtsam/3rdparty/Eigen/src/QR/HouseholderQR.h b/gtsam/3rdparty/Eigen/src/QR/HouseholderQR.h index f8d759772..0d2b74893 100644 --- a/gtsam/3rdparty/Eigen/src/QR/HouseholderQR.h +++ b/gtsam/3rdparty/Eigen/src/QR/HouseholderQR.h @@ -68,8 +68,8 @@ template class HouseholderQR typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; typedef Matrix MatrixQType; - typedef typename ei_plain_diag_type::type HCoeffsType; - typedef typename ei_plain_row_type::type RowVectorType; + typedef typename internal::plain_diag_type::type HCoeffsType; + typedef typename internal::plain_row_type::type RowVectorType; typedef typename HouseholderSequence::ConjugateReturnType HouseholderSequenceType; /** @@ -119,16 +119,16 @@ template class HouseholderQR * Output: \verbinclude HouseholderQR_solve.out */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "HouseholderQR is not initialized."); - return ei_solve_retval(*this, b.derived()); + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + return internal::solve_retval(*this, b.derived()); } HouseholderSequenceType householderQ() const { - ei_assert(m_isInitialized && "HouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()); } @@ -137,7 +137,7 @@ template class HouseholderQR */ const MatrixType& matrixQR() const { - ei_assert(m_isInitialized && "HouseholderQR is not initialized."); + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); return m_qr; } @@ -186,22 +186,24 @@ template class HouseholderQR template typename MatrixType::RealScalar HouseholderQR::absDeterminant() const { - ei_assert(m_isInitialized && "HouseholderQR is not initialized."); - ei_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); - return ei_abs(m_qr.diagonal().prod()); + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + return internal::abs(m_qr.diagonal().prod()); } template typename MatrixType::RealScalar HouseholderQR::logAbsDeterminant() const { - ei_assert(m_isInitialized && "HouseholderQR is not initialized."); - ei_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); + eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); + eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return m_qr.diagonal().cwiseAbs().array().log().sum(); } +namespace internal { + /** \internal */ template -void ei_householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0) +void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0) { typedef typename MatrixQR::Index Index; typedef typename MatrixQR::Scalar Scalar; @@ -210,7 +212,7 @@ void ei_householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typena Index cols = mat.cols(); Index size = std::min(rows,cols); - ei_assert(hCoeffs.size() == size); + eigen_assert(hCoeffs.size() == size); typedef Matrix TempType; TempType tempVector; @@ -237,7 +239,7 @@ void ei_householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typena /** \internal */ template -void ei_householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, +void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Index maxBlockSize=32, typename MatrixQR::Scalar* tempData = 0) { @@ -278,37 +280,19 @@ void ei_householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, BlockType A11_21 = mat.block(k,k,brows,bs); Block hCoeffsSegment = hCoeffs.segment(k,bs); - ei_householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData); + householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData); if(tcols) { BlockType A21_22 = mat.block(k,k+bs,brows,tcols); - ei_apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint()); + apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint()); } } } -template -HouseholderQR& HouseholderQR::compute(const MatrixType& matrix) -{ - Index rows = matrix.rows(); - Index cols = matrix.cols(); - Index size = std::min(rows,cols); - - m_qr = matrix; - m_hCoeffs.resize(size); - - m_temp.resize(cols); - - ei_householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data()); - - m_isInitialized = true; - return *this; -} - template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { EIGEN_MAKE_SOLVE_HELPERS(HouseholderQR<_MatrixType>,Rhs) @@ -316,7 +300,7 @@ struct ei_solve_retval, Rhs> { const Index rows = dec().rows(), cols = dec().cols(); const Index rank = std::min(rows, cols); - ei_assert(rhs().rows() == rows); + eigen_assert(rhs().rows() == rows); typename Rhs::PlainObject c(rhs()); @@ -336,6 +320,26 @@ struct ei_solve_retval, Rhs> } }; +} // end namespace internal + +template +HouseholderQR& HouseholderQR::compute(const MatrixType& matrix) +{ + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index size = std::min(rows,cols); + + m_qr = matrix; + m_hCoeffs.resize(size); + + m_temp.resize(cols); + + internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data()); + + m_isInitialized = true; + return *this; +} + /** \return the Householder QR decomposition of \c *this. * * \sa class HouseholderQR diff --git a/gtsam/3rdparty/Eigen/src/SVD/JacobiSVD.h b/gtsam/3rdparty/Eigen/src/SVD/JacobiSVD.h index f12494dbc..6826e2c6d 100644 --- a/gtsam/3rdparty/Eigen/src/SVD/JacobiSVD.h +++ b/gtsam/3rdparty/Eigen/src/SVD/JacobiSVD.h @@ -25,12 +25,12 @@ #ifndef EIGEN_JACOBISVD_H #define EIGEN_JACOBISVD_H +namespace internal { // forward declaration (needed by ICC) // the empty body is required by MSVC template::IsComplex> -struct ei_svd_precondition_2x2_block_to_be_real {}; - +struct svd_precondition_2x2_block_to_be_real {}; /*** QR preconditioners (R-SVD) *** @@ -42,7 +42,7 @@ struct ei_svd_precondition_2x2_block_to_be_real {}; enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; template -struct ei_qr_preconditioner_should_do_anything +struct qr_preconditioner_should_do_anything { enum { a = MatrixType::RowsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime != Dynamic && @@ -57,11 +57,11 @@ struct ei_qr_preconditioner_should_do_anything }; template::ret -> struct ei_qr_preconditioner_impl {}; + bool DoAnything = qr_preconditioner_should_do_anything::ret +> struct qr_preconditioner_impl {}; template -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD&, const MatrixType&) { @@ -72,7 +72,7 @@ struct ei_qr_preconditioner_impl /*** preconditioner using FullPivHouseholderQR ***/ template -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD& svd, const MatrixType& matrix) { @@ -89,7 +89,7 @@ struct ei_qr_preconditioner_impl -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD& svd, const MatrixType& matrix) { @@ -111,7 +111,7 @@ struct ei_qr_preconditioner_impl -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD& svd, const MatrixType& matrix) { @@ -132,7 +132,7 @@ struct ei_qr_preconditioner_impl -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD& svd, const MatrixType& matrix) { @@ -158,7 +158,7 @@ struct ei_qr_preconditioner_impl -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD& svd, const MatrixType& matrix) { @@ -179,7 +179,7 @@ struct ei_qr_preconditioner_impl -struct ei_qr_preconditioner_impl +struct qr_preconditioner_impl { static bool run(JacobiSVD& svd, const MatrixType& matrix) { @@ -202,7 +202,90 @@ struct ei_qr_preconditioner_impl +struct svd_precondition_2x2_block_to_be_real +{ + typedef JacobiSVD SVD; + typedef typename SVD::Index Index; + static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} +}; + +template +struct svd_precondition_2x2_block_to_be_real +{ + typedef JacobiSVD SVD; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename SVD::Index Index; + static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) + { + Scalar z; + JacobiRotation rot; + RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p))); + if(n==0) + { + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.row(p) *= z; + if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + else + { + rot.c() = conj(work_matrix.coeff(p,p)) / n; + rot.s() = work_matrix.coeff(q,p) / n; + work_matrix.applyOnTheLeft(p,q,rot); + if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); + if(work_matrix.coeff(p,q) != Scalar(0)) + { + Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.col(q) *= z; + if(svd.computeV()) svd.m_matrixV.col(q) *= z; + } + if(work_matrix.coeff(q,q) != Scalar(0)) + { + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + } + } +}; + +template +void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, + JacobiRotation *j_left, + JacobiRotation *j_right) +{ + Matrix m; + m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)), + real(matrix.coeff(q,p)), real(matrix.coeff(q,q)); + JacobiRotation rot1; + RealScalar t = m.coeff(0,0) + m.coeff(1,1); + RealScalar d = m.coeff(1,0) - m.coeff(0,1); + if(t == RealScalar(0)) + { + rot1.c() = 0; + rot1.s() = d > 0 ? 1 : -1; + } + else + { + RealScalar u = d / t; + rot1.c() = RealScalar(1) / sqrt(1 + abs2(u)); + rot1.s() = rot1.c() * u; + } + m.applyOnTheLeft(0,1,rot1); + j_right->makeJacobi(m,0,1); + *j_left = rot1 * j_right->transpose(); +} + +} // end namespace internal /** \ingroup SVD_Module * @@ -239,6 +322,9 @@ struct ei_qr_preconditioner_impl class JacobiSVD typedef Matrix MatrixVType; - typedef typename ei_plain_diag_type::type SingularValuesType; - typedef typename ei_plain_row_type::type RowType; - typedef typename ei_plain_col_type::type ColType; + typedef typename internal::plain_diag_type::type SingularValuesType; + typedef typename internal::plain_row_type::type RowType; + typedef typename internal::plain_col_type::type ColType; typedef Matrix WorkMatrixType; @@ -290,7 +376,12 @@ template class JacobiSVD * The default constructor is useful in cases in which the user intends to * perform decompositions via JacobiSVD::compute(const MatrixType&). */ - JacobiSVD() : m_isInitialized(false) {} + JacobiSVD() + : m_isInitialized(false), + m_isAllocated(false), + m_computationOptions(0), + m_rows(-1), m_cols(-1) + {} /** \brief Default Constructor with memory preallocation @@ -300,6 +391,10 @@ template class JacobiSVD * \sa JacobiSVD() */ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) + : m_isInitialized(false), + m_isAllocated(false), + m_computationOptions(0), + m_rows(-1), m_cols(-1) { allocate(rows, cols, computationOptions); } @@ -315,11 +410,15 @@ template class JacobiSVD * available with the (non-default) FullPivHouseholderQR preconditioner. */ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + : m_isInitialized(false), + m_isAllocated(false), + m_computationOptions(0), + m_rows(-1), m_cols(-1) { compute(matrix, computationOptions); } - /** \brief Method performing the decomposition of given matrix. + /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. @@ -329,7 +428,18 @@ template class JacobiSVD * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not * available with the (non-default) FullPivHouseholderQR preconditioner. */ - JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions = 0); + JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + JacobiSVD& compute(const MatrixType& matrix) + { + return compute(matrix, m_computationOptions); + } /** \returns the \a U matrix. * @@ -342,8 +452,8 @@ template class JacobiSVD */ const MatrixUType& matrixU() const { - ei_assert(m_isInitialized && "JacobiSVD is not initialized."); - ei_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?"); + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); + eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?"); return m_matrixU; } @@ -358,19 +468,19 @@ template class JacobiSVD */ const MatrixVType& matrixV() const { - ei_assert(m_isInitialized && "JacobiSVD is not initialized."); - ei_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?"); + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); + eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?"); return m_matrixV; } /** \returns the vector of singular values. * * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the - * returned vector has size \a m. + * returned vector has size \a m. Singular values are always sorted in decreasing order. */ const SingularValuesType& singularValues() const { - ei_assert(m_isInitialized && "JacobiSVD is not initialized."); + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); return m_singularValues; } @@ -389,18 +499,18 @@ template class JacobiSVD * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. */ template - inline const ei_solve_retval + inline const internal::solve_retval solve(const MatrixBase& b) const { - ei_assert(m_isInitialized && "JacobiSVD is not initialized."); - ei_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return ei_solve_retval(*this, b.derived()); + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); + eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); + return internal::solve_retval(*this, b.derived()); } /** \returns the number of singular values that are not exactly 0 */ Index nonzeroSingularValues() const { - ei_assert(m_isInitialized && "JacobiSVD is not initialized."); + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); return m_nonzeroSingularValues; } @@ -408,41 +518,54 @@ template class JacobiSVD inline Index cols() const { return m_cols; } private: - void allocate(Index rows, Index cols, unsigned int computationOptions = 0); + void allocate(Index rows, Index cols, unsigned int computationOptions); protected: MatrixUType m_matrixU; MatrixVType m_matrixV; SingularValuesType m_singularValues; WorkMatrixType m_workMatrix; - bool m_isInitialized; + bool m_isInitialized, m_isAllocated; bool m_computeFullU, m_computeThinU; bool m_computeFullV, m_computeThinV; + unsigned int m_computationOptions; Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; template - friend struct ei_svd_precondition_2x2_block_to_be_real; + friend struct internal::svd_precondition_2x2_block_to_be_real; template - friend struct ei_qr_preconditioner_impl; + friend struct internal::qr_preconditioner_impl; }; template void JacobiSVD::allocate(Index rows, Index cols, unsigned int computationOptions) { + eigen_assert(rows >= 0 && cols >= 0); + + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return; + } + m_rows = rows; m_cols = cols; m_isInitialized = false; - m_computeFullU = computationOptions & ComputeFullU; - m_computeThinU = computationOptions & ComputeThinU; - m_computeFullV = computationOptions & ComputeFullV; - m_computeThinV = computationOptions & ComputeThinV; - ei_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U"); - ei_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V"); - ei_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns."); if (QRPreconditioner == FullPivHouseholderQRPreconditioner) { - ei_assert(!(m_computeThinU || m_computeThinV) && + eigen_assert(!(m_computeThinU || m_computeThinV) && "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " "Use the ColPivHouseholderQR preconditioner instead."); } @@ -457,85 +580,6 @@ void JacobiSVD::allocate(Index rows, Index cols, u m_workMatrix.resize(m_diagSize, m_diagSize); } - -template -struct ei_svd_precondition_2x2_block_to_be_real -{ - typedef JacobiSVD SVD; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} -}; - -template -struct ei_svd_precondition_2x2_block_to_be_real -{ - typedef JacobiSVD SVD; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) - { - Scalar z; - PlanarRotation rot; - RealScalar n = ei_sqrt(ei_abs2(work_matrix.coeff(p,p)) + ei_abs2(work_matrix.coeff(q,p))); - if(n==0) - { - z = ei_abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.row(p) *= z; - if(svd.computeU()) svd.m_matrixU.col(p) *= ei_conj(z); - z = ei_abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= ei_conj(z); - } - else - { - rot.c() = ei_conj(work_matrix.coeff(p,p)) / n; - rot.s() = work_matrix.coeff(q,p) / n; - work_matrix.applyOnTheLeft(p,q,rot); - if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); - if(work_matrix.coeff(p,q) != Scalar(0)) - { - Scalar z = ei_abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.col(q) *= z; - if(svd.computeV()) svd.m_matrixV.col(q) *= z; - } - if(work_matrix.coeff(q,q) != Scalar(0)) - { - z = ei_abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= ei_conj(z); - } - } - } -}; - -template -void ei_real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, - PlanarRotation *j_left, - PlanarRotation *j_right) -{ - Matrix m; - m << ei_real(matrix.coeff(p,p)), ei_real(matrix.coeff(p,q)), - ei_real(matrix.coeff(q,p)), ei_real(matrix.coeff(q,q)); - PlanarRotation rot1; - RealScalar t = m.coeff(0,0) + m.coeff(1,1); - RealScalar d = m.coeff(1,0) - m.coeff(0,1); - if(t == RealScalar(0)) - { - rot1.c() = 0; - rot1.s() = d > 0 ? 1 : -1; - } - else - { - RealScalar u = d / t; - rot1.c() = RealScalar(1) / ei_sqrt(1 + ei_abs2(u)); - rot1.s() = rot1.c() * u; - } - m.applyOnTheLeft(0,1,rot1); - j_right->makeJacobi(m,0,1); - *j_left = rot1 * j_right->transpose(); -} - template JacobiSVD& JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) @@ -544,12 +588,12 @@ JacobiSVD::compute(const MatrixType& matrix, unsig // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, // only worsening the precision of U and V as we accumulate more rotations - const RealScalar precision = 2 * NumTraits::epsilon(); + const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ - if(!ei_qr_preconditioner_impl::run(*this, matrix) - && !ei_qr_preconditioner_impl::run(*this, matrix)) + if(!internal::qr_preconditioner_impl::run(*this, matrix) + && !internal::qr_preconditioner_impl::run(*this, matrix)) { m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize); if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); @@ -574,15 +618,15 @@ JacobiSVD::compute(const MatrixType& matrix, unsig // if this 2x2 sub-matrix is not diagonal already... // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // keep us iterating forever. - if(std::max(ei_abs(m_workMatrix.coeff(p,q)),ei_abs(m_workMatrix.coeff(q,p))) - > std::max(ei_abs(m_workMatrix.coeff(p,p)),ei_abs(m_workMatrix.coeff(q,q)))*precision) + if(std::max(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) + > std::max(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision) { finished = false; // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal - ei_svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); - PlanarRotation j_left, j_right; - ei_real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); + internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); + JacobiRotation j_left, j_right; + internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); // accumulate resulting Jacobi rotations m_workMatrix.applyOnTheLeft(p,q,j_left); @@ -599,7 +643,7 @@ JacobiSVD::compute(const MatrixType& matrix, unsig for(Index i = 0; i < m_diagSize; ++i) { - RealScalar a = ei_abs(m_workMatrix.coeff(i,i)); + RealScalar a = internal::abs(m_workMatrix.coeff(i,i)); m_singularValues.coeffRef(i) = a; if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; } @@ -629,16 +673,17 @@ JacobiSVD::compute(const MatrixType& matrix, unsig return *this; } +namespace internal { template -struct ei_solve_retval, Rhs> - : ei_solve_retval_base, Rhs> +struct solve_retval, Rhs> + : solve_retval_base, Rhs> { typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType; EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs) template void evalTo(Dest& dst) const { - ei_assert(rhs().rows() == dec().rows()); + eigen_assert(rhs().rows() == dec().rows()); // A = U S V^* // So A^{-1} = V S^{-1} U^* @@ -656,5 +701,15 @@ struct ei_solve_retval, Rhs> * rhs(); } }; +} // end namespace internal + +template +JacobiSVD::PlainObject> +MatrixBase::jacobiSvd(unsigned int computationOptions) const +{ + return JacobiSVD(*this, computationOptions); +} + + #endif // EIGEN_JACOBISVD_H diff --git a/gtsam/3rdparty/Eigen/src/SVD/UpperBidiagonalization.h b/gtsam/3rdparty/Eigen/src/SVD/UpperBidiagonalization.h index 1e1355b52..2de197da9 100644 --- a/gtsam/3rdparty/Eigen/src/SVD/UpperBidiagonalization.h +++ b/gtsam/3rdparty/Eigen/src/SVD/UpperBidiagonalization.h @@ -25,6 +25,10 @@ #ifndef EIGEN_BIDIAGONALIZATION_H #define EIGEN_BIDIAGONALIZATION_H +namespace internal { +// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. +// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. + template class UpperBidiagonalization { public: @@ -33,7 +37,7 @@ template class UpperBidiagonalization enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, - ColsAtCompileTimeMinusOne = ei_decrement_size::ret + ColsAtCompileTimeMinusOne = internal::decrement_size::ret }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -44,12 +48,12 @@ template class UpperBidiagonalization typedef Matrix DiagVectorType; typedef Matrix SuperDiagVectorType; typedef HouseholderSequence< - MatrixType, - CwiseUnaryOp, Diagonal > + const MatrixType, + CwiseUnaryOp, const Diagonal > > HouseholderUSequenceType; typedef HouseholderSequence< - MatrixType, - Diagonal, + const MatrixType, + Diagonal, OnTheRight > HouseholderVSequenceType; @@ -74,17 +78,18 @@ template class UpperBidiagonalization const MatrixType& householder() const { return m_householder; } const BidiagonalType& bidiagonal() const { return m_bidiagonal; } - HouseholderUSequenceType householderU() const + const HouseholderUSequenceType householderU() const { - ei_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); + eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate()); } - HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy + const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy { - ei_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); - return HouseholderVSequenceType(m_householder, m_householder.template diagonal<1>(), - false, m_householder.cols()-1, 1); + eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); + return HouseholderVSequenceType(m_householder, m_householder.const_derived().template diagonal<1>()) + .setLength(m_householder.cols()-1) + .setShift(1); } protected: @@ -99,7 +104,7 @@ UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::comput Index rows = matrix.rows(); Index cols = matrix.cols(); - ei_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols."); + eigen_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols."); m_householder = matrix; @@ -149,5 +154,6 @@ MatrixBase::bidiagonalization() const } #endif +} // end namespace internal #endif // EIGEN_BIDIAGONALIZATION_H diff --git a/gtsam/3rdparty/Eigen/src/Sparse/AmbiVector.h b/gtsam/3rdparty/Eigen/src/Sparse/AmbiVector.h index f2e92f93c..01c93fbd7 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/AmbiVector.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/AmbiVector.h @@ -183,7 +183,7 @@ void AmbiVector<_Scalar,_Index>::setZero() } else { - ei_assert(m_mode==IsSparse); + eigen_assert(m_mode==IsSparse); m_llSize = 0; m_llStart = -1; } @@ -198,7 +198,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); // TODO factorize the following code to reduce code generation - ei_assert(m_mode==IsSparse); + eigen_assert(m_mode==IsSparse); if (m_llSize==0) { // this is the first element @@ -225,7 +225,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) else { Index nextel = llElements[m_llCurrent].next; - ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); + eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i) { m_llCurrent = nextel; @@ -244,7 +244,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) reallocateSparse(); llElements = reinterpret_cast(m_buffer); } - ei_internal_assert(m_llSize::coeff(_Index i) else { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); - ei_assert(m_mode==IsSparse); + eigen_assert(m_mode==IsSparse); if ((m_llSize==0) || (i::Iterator { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); m_currentEl = m_vector.m_llStart; - while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)=0 && internal::abs(llElements[m_currentEl].value)::Iterator { do { ++m_cachedIndex; - } while (m_cachedIndex::Iterator ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); do { m_currentEl = llElements[m_currentEl].next; - } while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)=0 && internal::abs(llElements[m_currentEl].value) -struct ei_traits > + +namespace internal { +template +struct traits > { typedef _Scalar Scalar; typedef _Index Index; @@ -54,15 +56,16 @@ struct ei_traits > ColsAtCompileTime = Dynamic, MaxRowsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic, - Flags = _Flags | NestByRefBit | LvalueBit, + Flags = _Options | NestByRefBit | LvalueBit, CoeffReadCost = NumTraits::ReadCost, SupportedAccessPatterns = OuterRandomAccessPattern }; }; +} -template +template class DynamicSparseMatrix - : public SparseMatrixBase > + : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix) @@ -71,6 +74,10 @@ class DynamicSparseMatrix // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=) typedef MappedSparseMatrix Map; using Base::IsRowMajor; + using Base::operator=; + enum { + Options = _Options + }; protected: @@ -102,7 +109,7 @@ class DynamicSparseMatrix /** \returns a reference to the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. If the coefficient does not - * exist yet, then a sorted insertion Indexo a sequential buffer is performed. + * exist yet, then a sorted insertion into a sequential buffer is performed. */ inline Scalar& coeffRef(Index row, Index col) { @@ -158,8 +165,8 @@ class DynamicSparseMatrix /** \sa insertBack */ inline Scalar& insertBackByOuterInner(Index outer, Index inner) { - ei_assert(outer - inline DynamicSparseMatrix(const SparseMatrixBase& other) + explicit inline DynamicSparseMatrix(const SparseMatrixBase& other) : m_innerSize(0) { - *this = other.derived(); + Base::operator=(other.derived()); } inline DynamicSparseMatrix(const DynamicSparseMatrix& other) @@ -272,12 +279,6 @@ class DynamicSparseMatrix return *this; } - template - inline DynamicSparseMatrix& operator=(const SparseMatrixBase& other) - { - return SparseMatrixBase::operator=(other.derived()); - } - /** Destructor */ inline ~DynamicSparseMatrix() {} @@ -320,12 +321,16 @@ class DynamicSparseMatrix /** \deprecated use finalize() * Does nothing. Provided for compatibility with SparseMatrix. */ EIGEN_DEPRECATED void endFill() {} + +# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN +# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN +# endif }; -template -class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator +template +class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator { - typedef typename SparseVector::InnerIterator Base; + typedef typename SparseVector::InnerIterator Base; public: InnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) diff --git a/gtsam/3rdparty/Eigen/src/Sparse/MappedSparseMatrix.h b/gtsam/3rdparty/Eigen/src/Sparse/MappedSparseMatrix.h index 941290a35..31a431fb2 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/MappedSparseMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/MappedSparseMatrix.h @@ -34,9 +34,11 @@ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * */ +namespace internal { template -struct ei_traits > : ei_traits > +struct traits > : traits > {}; +} template class MappedSparseMatrix @@ -101,11 +103,11 @@ class MappedSparseMatrix Index start = m_outerIndex[outer]; Index end = m_outerIndex[outer+1]; - ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); - ei_assert(end>start && "coeffRef cannot be called on a zero coefficient"); + eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); + eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); const Index id = r-&m_innerIndices[0]; - ei_assert((*r==inner) && (id -struct ei_traits > +struct traits > { - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_traits::Index Index; - typedef typename ei_traits::StorageKind StorageKind; + typedef typename traits::Scalar Scalar; + typedef typename traits::Index Index; + typedef typename traits::StorageKind StorageKind; typedef MatrixXpr XprKind; enum { IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit, @@ -42,14 +43,15 @@ struct ei_traits > CoeffReadCost = MatrixType::CoeffReadCost }; }; +} // end namespace internal template -class SparseInnerVectorSet : ei_no_assignment_operator, +class SparseInnerVectorSet : internal::no_assignment_operator, public SparseMatrixBase > { public: - enum { IsRowMajor = ei_traits::IsRowMajor }; + enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) class InnerIterator: public MatrixType::InnerIterator @@ -67,14 +69,14 @@ class SparseInnerVectorSet : ei_no_assignment_operator, inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { - ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { - ei_assert(Size!=Dynamic); - ei_assert( (outer>=0) && (outer=0) && (outer @@ -96,7 +98,7 @@ class SparseInnerVectorSet : ei_no_assignment_operator, const typename MatrixType::Nested m_matrix; Index m_outerStart; - const ei_variable_if_dynamic m_outerSize; + const internal::variable_if_dynamic m_outerSize; }; /*************************************************************************** @@ -110,7 +112,7 @@ class SparseInnerVectorSet, Size> typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType; public: - enum { IsRowMajor = ei_traits::IsRowMajor }; + enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) class InnerIterator: public MatrixType::InnerIterator @@ -128,14 +130,14 @@ class SparseInnerVectorSet, Size> inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { - ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { - ei_assert(Size!=Dynamic); - ei_assert( (outer>=0) && (outer=0) && (outer @@ -175,7 +177,7 @@ class SparseInnerVectorSet, Size> const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet); - ei_assert(m_matrix.data()[m_outerStart].size()>0); + eigen_assert(m_matrix.data()[m_outerStart].size()>0); return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1); } @@ -192,7 +194,7 @@ class SparseInnerVectorSet, Size> const typename MatrixType::Nested m_matrix; Index m_outerStart; - const ei_variable_if_dynamic m_outerSize; + const internal::variable_if_dynamic m_outerSize; }; @@ -208,7 +210,7 @@ class SparseInnerVectorSet, Size> typedef SparseMatrix<_Scalar, _Options> MatrixType; public: - enum { IsRowMajor = ei_traits::IsRowMajor }; + enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) class InnerIterator: public MatrixType::InnerIterator @@ -226,20 +228,20 @@ class SparseInnerVectorSet, Size> inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { - ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { - ei_assert(Size==1); - ei_assert( (outer>=0) && (outer=0) && (outer inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) { - typedef typename ei_cleantype::type _NestedMatrixType; + typedef typename internal::remove_all::type _NestedMatrixType; _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; // This assignement is slow if this vector set not empty // and/or it is not at the end of the nonzeros of the underlying matrix. @@ -295,11 +297,11 @@ class SparseInnerVectorSet, Size> } // update outer index pointers - Index id = nnz_head; + Index p = nnz_head; for(Index k=1; k, Size> const Scalar& lastCoeff() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet); - ei_assert(nonZeros()>0); + eigen_assert(nonZeros()>0); return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart+1]-1]; } @@ -356,7 +358,7 @@ class SparseInnerVectorSet, Size> const typename MatrixType::Nested m_matrix; Index m_outerStart; - const ei_variable_if_dynamic m_outerSize; + const internal::variable_if_dynamic m_outerSize; }; diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseBinaryOp.h index a4fb7ea86..cde5bbc03 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseBinaryOp.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseBinaryOp.h @@ -42,12 +42,21 @@ // 4 - dense op dense product dense // generic dense -template<> struct ei_promote_storage_type +namespace internal { + +template<> struct promote_storage_type { typedef Sparse ret; }; -template<> struct ei_promote_storage_type +template<> struct promote_storage_type { typedef Sparse ret; }; +template::StorageKind, + typename _RhsStorageMode = typename traits::StorageKind> +class sparse_cwise_binary_op_inner_iterator_selector; + +} // end namespace internal + template class CwiseBinaryOpImpl : public SparseMatrixBase > @@ -58,18 +67,13 @@ class CwiseBinaryOpImpl EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) }; -template::StorageKind, - typename _RhsStorageMode = typename ei_traits::StorageKind> -class ei_sparse_cwise_binary_op_inner_iterator_selector; - template class CwiseBinaryOpImpl::InnerIterator - : public ei_sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> + : public internal::sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> { public: typedef typename Lhs::Index Index; - typedef ei_sparse_cwise_binary_op_inner_iterator_selector< + typedef internal::sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) @@ -81,26 +85,28 @@ class CwiseBinaryOpImpl::InnerIterator * Implementation of inner-iterators ***************************************************************************/ -// template struct ei_func_is_conjunction { enum { ret = false }; }; -// template struct ei_func_is_conjunction > { enum { ret = true }; }; +// template struct internal::func_is_conjunction { enum { ret = false }; }; +// template struct internal::func_is_conjunction > { enum { ret = true }; }; -// TODO generalize the ei_scalar_product_op specialization to all conjunctions if any ! +// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any ! + +namespace internal { // sparse - sparse (generic) template -class ei_sparse_cwise_binary_op_inner_iterator_selector +class sparse_cwise_binary_op_inner_iterator_selector { typedef CwiseBinaryOp CwiseBinaryXpr; - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_traits::_LhsNested _LhsNested; - typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename traits::Scalar Scalar; + typedef typename traits::_LhsNested _LhsNested; + typedef typename traits::_RhsNested _RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) + EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { this->operator++(); @@ -153,19 +159,19 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector -class ei_sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Sparse> +class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Sparse> { - typedef ei_scalar_product_op BinaryFunc; + typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; - typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename traits::_LhsNested _LhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; - typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) + EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) @@ -207,19 +213,19 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, // sparse - dense (product) template -class ei_sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Dense> +class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Sparse, Dense> { - typedef ei_scalar_product_op BinaryFunc; + typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; - typedef typename ei_traits::_LhsNested _LhsNested; - typedef typename ei_traits::RhsNested RhsNested; + typedef typename traits::_LhsNested _LhsNested; + typedef typename traits::RhsNested RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) + EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} @@ -248,19 +254,19 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, // sparse - dense (product) template -class ei_sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Dense, Sparse> +class sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, Dense, Sparse> { - typedef ei_scalar_product_op BinaryFunc; + typedef scalar_product_op BinaryFunc; typedef CwiseBinaryOp CwiseBinaryXpr; typedef typename CwiseBinaryXpr::Scalar Scalar; - typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) + EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} @@ -286,6 +292,7 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, const Index m_outer; }; +} // end namespace internal /*************************************************************************** * Implementation of SparseMatrixBase and SparseCwise functions/operators @@ -293,11 +300,11 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, // template // template -// EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, +// EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, // Derived, OtherDerived> // SparseMatrixBase::operator-(const SparseMatrixBase &other) const // { -// return CwiseBinaryOp, +// return CwiseBinaryOp, // Derived, OtherDerived>(derived(), other.derived()); // } @@ -311,10 +318,10 @@ SparseMatrixBase::operator-=(const SparseMatrixBase &othe // template // template -// EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, Derived, OtherDerived> +// EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, Derived, OtherDerived> // SparseMatrixBase::operator+(const SparseMatrixBase &other) const // { -// return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); +// return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); // } template @@ -343,18 +350,18 @@ SparseMatrixBase::cwiseProduct(const MatrixBase &other) c // template // template -// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) // SparseCwise::operator/(const SparseMatrixBase &other) const // { -// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived()); +// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived()); // } // // template // template -// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) // SparseCwise::operator/(const MatrixBase &other) const // { -// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived()); +// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived()); // } // template diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseUnaryOp.h index 514f1c00b..aa068835f 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseUnaryOp.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseCwiseUnaryOp.h @@ -26,15 +26,15 @@ #define EIGEN_SPARSE_CWISE_UNARY_OP_H // template -// struct ei_traits > : ei_traits +// struct internal::traits > : internal::traits // { -// typedef typename ei_result_of< +// typedef typename internal::result_of< // UnaryOp(typename MatrixType::Scalar) // >::type Scalar; // typedef typename MatrixType::Nested MatrixTypeNested; -// typedef typename ei_unref::type _MatrixTypeNested; +// typedef typename internal::remove_reference::type _MatrixTypeNested; // enum { -// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits::Cost +// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + internal::functor_traits::Cost // }; // }; @@ -45,7 +45,7 @@ class CwiseUnaryOpImpl public: class InnerIterator; -// typedef typename ei_unref::type _LhsNested; +// typedef typename internal::remove_reference::type _LhsNested; typedef CwiseUnaryOp Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) @@ -55,7 +55,7 @@ template class CwiseUnaryOpImpl::InnerIterator { typedef typename CwiseUnaryOpImpl::Scalar Scalar; - typedef typename ei_traits::_XprTypeNested _MatrixTypeNested; + typedef typename internal::traits::_XprTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; typedef typename MatrixType::Index Index; public: @@ -87,7 +87,7 @@ class CwiseUnaryViewImpl public: class InnerIterator; -// typedef typename ei_unref::type _LhsNested; +// typedef typename internal::remove_reference::type _LhsNested; typedef CwiseUnaryView Derived; EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) @@ -97,7 +97,7 @@ template class CwiseUnaryViewImpl::InnerIterator { typedef typename CwiseUnaryViewImpl::Scalar Scalar; - typedef typename ei_traits::_MatrixTypeNested _MatrixTypeNested; + typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; typedef typename MatrixType::Index Index; public: diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseDenseProduct.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseDenseProduct.h index 0489c68db..0f77aa5be 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseDenseProduct.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseDenseProduct.h @@ -45,26 +45,28 @@ template struct DenseSparseProductReturnType Type; }; +namespace internal { + template -struct ei_traits > +struct traits > { typedef Sparse StorageKind; - typedef typename ei_scalar_product_traits::Scalar, - typename ei_traits::Scalar>::ReturnType Scalar; + typedef typename scalar_product_traits::Scalar, + typename traits::Scalar>::ReturnType Scalar; typedef typename Lhs::Index Index; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; - typedef typename ei_cleantype::type _LhsNested; - typedef typename ei_cleantype::type _RhsNested; + typedef typename remove_all::type _LhsNested; + typedef typename remove_all::type _RhsNested; enum { - LhsCoeffReadCost = ei_traits<_LhsNested>::CoeffReadCost, - RhsCoeffReadCost = ei_traits<_RhsNested>::CoeffReadCost, + LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost, + RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost, - RowsAtCompileTime = Tr ? int(ei_traits::RowsAtCompileTime) : int(ei_traits::RowsAtCompileTime), - ColsAtCompileTime = Tr ? int(ei_traits::ColsAtCompileTime) : int(ei_traits::ColsAtCompileTime), - MaxRowsAtCompileTime = Tr ? int(ei_traits::MaxRowsAtCompileTime) : int(ei_traits::MaxRowsAtCompileTime), - MaxColsAtCompileTime = Tr ? int(ei_traits::MaxColsAtCompileTime) : int(ei_traits::MaxColsAtCompileTime), + RowsAtCompileTime = Tr ? int(traits::RowsAtCompileTime) : int(traits::RowsAtCompileTime), + ColsAtCompileTime = Tr ? int(traits::ColsAtCompileTime) : int(traits::ColsAtCompileTime), + MaxRowsAtCompileTime = Tr ? int(traits::MaxRowsAtCompileTime) : int(traits::MaxRowsAtCompileTime), + MaxColsAtCompileTime = Tr ? int(traits::MaxColsAtCompileTime) : int(traits::MaxColsAtCompileTime), Flags = Tr ? RowMajorBit : 0, @@ -72,6 +74,8 @@ struct ei_traits > }; }; +} // end namespace internal + template class SparseDenseOuterProduct : public SparseMatrixBase > @@ -80,7 +84,7 @@ class SparseDenseOuterProduct typedef SparseMatrixBase Base; EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct) - typedef ei_traits Traits; + typedef internal::traits Traits; private: @@ -137,13 +141,15 @@ class SparseDenseOuterProduct::InnerIterator : public _LhsNes Scalar m_factor; }; +namespace internal { template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > { typedef Dense StorageKind; typedef MatrixXpr XprKind; }; +} // end namespace internal template class SparseTimeDenseProduct @@ -157,14 +163,14 @@ class SparseTimeDenseProduct template void scaleAndAddTo(Dest& dest, Scalar alpha) const { - typedef typename ei_cleantype::type _Lhs; - typedef typename ei_cleantype::type _Rhs; + typedef typename internal::remove_all::type _Lhs; + typedef typename internal::remove_all::type _Rhs; typedef typename _Lhs::InnerIterator LhsInnerIterator; enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit }; for(Index j=0; j dest_j(dest.row(LhsIsRowMajor ? j : 0)); + typename Rhs::Scalar rhs_j = alpha * m_rhs.coeff(LhsIsRowMajor ? 0 : j,0); + typename Dest::RowXpr dest_j(dest.row(LhsIsRowMajor ? j : 0)); for(LhsInnerIterator it(m_lhs,j); it ;++it) { if(LhsIsRowMajor) dest_j += (alpha*it.value()) * m_rhs.row(it.index()); @@ -180,12 +186,14 @@ class SparseTimeDenseProduct // dense = dense * sparse +namespace internal { template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > { typedef Dense StorageKind; }; +} // end namespace internal template class DenseTimeSparseProduct @@ -199,7 +207,7 @@ class DenseTimeSparseProduct template void scaleAndAddTo(Dest& dest, Scalar alpha) const { - typedef typename ei_cleantype::type _Rhs; + typedef typename internal::remove_all::type _Rhs; typedef typename _Rhs::InnerIterator RhsInnerIterator; enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit }; for(Index j=0; j -struct ei_traits > +struct traits > { - typedef typename ei_cleantype::type _Lhs; - typedef typename ei_cleantype::type _Rhs; + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; typedef typename _Lhs::Scalar Scalar; - typedef typename ei_promote_index_type::Index, - typename ei_traits::Index>::type Index; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -54,7 +56,7 @@ struct ei_traits > MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime, MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime, - SparseFlags = ei_is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags), + SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags), Flags = (SparseFlags&RowMajorBit), CoeffReadCost = Dynamic }; @@ -62,37 +64,39 @@ struct ei_traits > enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor}; template -class ei_sparse_diagonal_product_inner_iterator_selector; +class sparse_diagonal_product_inner_iterator_selector; + +} // end namespace internal template class SparseDiagonalProduct : public SparseMatrixBase >, - ei_no_assignment_operator + internal::no_assignment_operator { typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; - typedef typename ei_cleantype::type _LhsNested; - typedef typename ei_cleantype::type _RhsNested; + typedef typename internal::remove_all::type _LhsNested; + typedef typename internal::remove_all::type _RhsNested; enum { - LhsMode = ei_is_diagonal<_LhsNested>::ret ? SDP_IsDiagonal - : (_LhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor, - RhsMode = ei_is_diagonal<_RhsNested>::ret ? SDP_IsDiagonal - : (_RhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor + LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal + : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor, + RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal + : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor }; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct) - typedef ei_sparse_diagonal_product_inner_iterator_selector + typedef internal::sparse_diagonal_product_inner_iterator_selector <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { - ei_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product"); + eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product"); } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } @@ -106,75 +110,78 @@ class SparseDiagonalProduct RhsNested m_rhs; }; +namespace internal { template -class ei_sparse_diagonal_product_inner_iterator_selector +class sparse_diagonal_product_inner_iterator_selector - : public CwiseUnaryOp,Rhs>::InnerIterator + : public CwiseUnaryOp,const Rhs>::InnerIterator { - typedef typename CwiseUnaryOp,Rhs>::InnerIterator Base; + typedef typename CwiseUnaryOp,const Rhs>::InnerIterator Base; typedef typename Lhs::Index Index; public: - inline ei_sparse_diagonal_product_inner_iterator_selector( + inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer) {} }; template -class ei_sparse_diagonal_product_inner_iterator_selector +class sparse_diagonal_product_inner_iterator_selector : public CwiseBinaryOp< - ei_scalar_product_op, + scalar_product_op, SparseInnerVectorSet, typename Lhs::DiagonalVectorType>::InnerIterator { typedef typename CwiseBinaryOp< - ei_scalar_product_op, + scalar_product_op, SparseInnerVectorSet, typename Lhs::DiagonalVectorType>::InnerIterator Base; typedef typename Lhs::Index Index; public: - inline ei_sparse_diagonal_product_inner_iterator_selector( + inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0) {} }; template -class ei_sparse_diagonal_product_inner_iterator_selector +class sparse_diagonal_product_inner_iterator_selector - : public CwiseUnaryOp,Lhs>::InnerIterator + : public CwiseUnaryOp,const Lhs>::InnerIterator { - typedef typename CwiseUnaryOp,Lhs>::InnerIterator Base; + typedef typename CwiseUnaryOp,const Lhs>::InnerIterator Base; typedef typename Lhs::Index Index; public: - inline ei_sparse_diagonal_product_inner_iterator_selector( + inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer) {} }; template -class ei_sparse_diagonal_product_inner_iterator_selector +class sparse_diagonal_product_inner_iterator_selector : public CwiseBinaryOp< - ei_scalar_product_op, + scalar_product_op, SparseInnerVectorSet, - Transpose >::InnerIterator + Transpose >::InnerIterator { typedef typename CwiseBinaryOp< - ei_scalar_product_op, + scalar_product_op, SparseInnerVectorSet, - Transpose >::InnerIterator Base; + Transpose >::InnerIterator Base; typedef typename Lhs::Index Index; public: - inline ei_sparse_diagonal_product_inner_iterator_selector( + inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0) {} }; +} // end namespace internal + // SparseMatrixBase functions template diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseDot.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseDot.h index 42ad07aeb..1f10f71a4 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseDot.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseDot.h @@ -27,23 +27,23 @@ template template -typename ei_traits::Scalar +typename internal::traits::Scalar SparseMatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - ei_assert(size() == other.size()); - ei_assert(other.size()>0 && "you are using a non initialized vector"); + eigen_assert(size() == other.size()); + eigen_assert(other.size()>0 && "you are using a non initialized vector"); typename Derived::InnerIterator i(derived(),0); Scalar res = 0; while (i) { - res += ei_conj(i.value()) * other.coeff(i.index()); + res += internal::conj(i.value()) * other.coeff(i.index()); ++i; } return res; @@ -51,16 +51,16 @@ SparseMatrixBase::dot(const MatrixBase& other) const template template -typename ei_traits::Scalar +typename internal::traits::Scalar SparseMatrixBase::dot(const SparseMatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) - EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - ei_assert(size() == other.size()); + eigen_assert(size() == other.size()); typename Derived::InnerIterator i(derived(),0); typename OtherDerived::InnerIterator j(other.derived(),0); @@ -69,7 +69,7 @@ SparseMatrixBase::dot(const SparseMatrixBase& other) cons { if (i.index()==j.index()) { - res += ei_conj(i.value()) * j.value(); + res += internal::conj(i.value()) * j.value(); ++i; ++j; } else if (i.index()::dot(const SparseMatrixBase& other) cons } template -inline typename NumTraits::Scalar>::Real +inline typename NumTraits::Scalar>::Real SparseMatrixBase::squaredNorm() const { - return ei_real((*this).cwiseAbs2().sum()); + return internal::real((*this).cwiseAbs2().sum()); } template -inline typename NumTraits::Scalar>::Real +inline typename NumTraits::Scalar>::Real SparseMatrixBase::norm() const { - return ei_sqrt(squaredNorm()); + return internal::sqrt(squaredNorm()); } #endif // EIGEN_SPARSE_DOT_H diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseFuzzy.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseFuzzy.h index bf6d2e250..ddcef88ee 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseFuzzy.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseFuzzy.h @@ -32,8 +32,8 @@ // typename NumTraits::Real prec // ) const // { -// const typename ei_nested::type nested(derived()); -// const typename ei_nested::type otherNested(other.derived()); +// const typename internal::nested::type nested(derived()); +// const typename internal::nested::type otherNested(other.derived()); // return (nested - otherNested).cwise().abs2().sum() // <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); // } diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrix.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrix.h index 820cf2884..9e7802736 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrix.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrix.h @@ -34,16 +34,20 @@ * This class implements a sparse matrix using the very common compressed row/column storage * scheme. * - * \param _Scalar the scalar type, i.e. the type of the coefficients - * \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility + * \tparam _Scalar the scalar type, i.e. the type of the coefficients + * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility * is RowMajor. The default is 0 which means column-major. - * \param _Index the type of the indices. Default is \c int. + * \tparam _Index the type of the indices. Default is \c int. * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. */ + +namespace internal { template -struct ei_traits > +struct traits > { typedef _Scalar Scalar; typedef _Index Index; @@ -60,12 +64,15 @@ struct ei_traits > }; }; +} // end namespace internal + template class SparseMatrix : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) +// using Base::operator=; EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=) // FIXME: why are these operator already alvailable ??? @@ -75,6 +82,9 @@ class SparseMatrix typedef MappedSparseMatrix Map; using Base::IsRowMajor; typedef CompressedStorage Storage; + enum { + Options = _Options + }; protected: @@ -120,11 +130,11 @@ class SparseMatrix Index start = m_outerIndex[outer]; Index end = m_outerIndex[outer+1]; - ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); - ei_assert(end>start && "coeffRef cannot be called on a zero coefficient"); - const Index id = m_data.searchLowerIndex(start,end-1,inner); - ei_assert((id=start && "you probably called coeffRef on a non finalized matrix"); + eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); + const Index p = m_data.searchLowerIndex(start,end-1,inner); + eigen_assert((p0) @@ -285,7 +295,7 @@ class SparseMatrix --j; // shift data of last vecs: Index k = m_outerIndex[j]-1; - while (k>=Index(id)) + while (k>=Index(p)) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); @@ -294,15 +304,15 @@ class SparseMatrix } } - while ( (id > startId) && (m_data.index(id-1) > inner) ) + while ( (p > startId) && (m_data.index(p-1) > inner) ) { - m_data.index(id) = m_data.index(id-1); - m_data.value(id) = m_data.value(id-1); - --id; + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; } - m_data.index(id) = inner; - return (m_data.value(id) = 0); + m_data.index(p) = inner; + return (m_data.value(p) = 0); } @@ -327,16 +337,29 @@ class SparseMatrix /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */ void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) + { + prune(default_prunning_func(reference,epsilon)); + } + + /** Suppress all nonzeros which do not satisfy the predicate \a keep. + * The functor type \a KeepFunc must implement the following function: + * \code + * bool operator() (const Index& row, const Index& col, const Scalar& value) const; + * \endcode + * \sa prune(Scalar,RealScalar) + */ + template + void prune(const KeepFunc& keep = KeepFunc()) { Index k = 0; - for (Index j=0; j inline SparseMatrix& operator=(const SparseSparseProduct& product) - { - return Base::operator=(product); - } + { return Base::operator=(product); } + + template + inline SparseMatrix& operator=(const ReturnByValue& other) + { return Base::operator=(other); } + + template + inline SparseMatrix& operator=(const EigenBase& other) + { return Base::operator=(other); } #endif template @@ -445,8 +474,8 @@ class SparseMatrix // 1 - compute the number of coeffs per dest inner vector // 2 - do the actual copy/eval // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed - typedef typename ei_nested::type OtherCopy; - typedef typename ei_cleantype::type _OtherCopy; + typedef typename internal::nested::type OtherCopy; + typedef typename internal::remove_all::type _OtherCopy; OtherCopy otherCopy(other.derived()); resize(other.rows(), other.cols()); @@ -561,19 +590,34 @@ class SparseMatrix } else { - ei_assert(m_data.index(m_data.size()-1) diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrixBase.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrixBase.h index 5ca3b604b..8695f7343 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrixBase.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseMatrixBase.h @@ -31,39 +31,49 @@ * * \brief Base class of any sparse matrices or sparse expressions * - * \param Derived - * - * + * \tparam Derived * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN. */ template class SparseMatrixBase : public EigenBase { public: - typedef typename ei_traits::Scalar Scalar; - typedef typename ei_packet_traits::type PacketScalar; - typedef typename ei_traits::StorageKind StorageKind; - typedef typename ei_traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; typedef SparseMatrixBase StorageBaseType; + typedef EigenBase Base; + + template + Derived& operator=(const EigenBase &other) + { + other.derived().evalTo(derived()); + return derived(); + } + +// using Base::operator=; enum { - RowsAtCompileTime = ei_traits::RowsAtCompileTime, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ - ColsAtCompileTime = ei_traits::ColsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ - SizeAtCompileTime = (ei_size_at_compile_time::RowsAtCompileTime, - ei_traits::ColsAtCompileTime>::ret), + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ @@ -71,7 +81,7 @@ template class SparseMatrixBase : public EigenBase MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, - MaxSizeAtCompileTime = (ei_size_at_compile_time::ret), IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, @@ -80,12 +90,12 @@ template class SparseMatrixBase : public EigenBase * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ - Flags = ei_traits::Flags, + Flags = internal::traits::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ - CoeffReadCost = ei_traits::CoeffReadCost, + CoeffReadCost = internal::traits::CoeffReadCost, /**< This is a rough measure of how expensive it is to read one coefficient from * this expression. */ @@ -98,28 +108,33 @@ template class SparseMatrixBase : public EigenBase }; /* \internal the return type of MatrixBase::conjugate() */ -// typedef typename ei_meta_if::IsComplex, -// const SparseCwiseUnaryOp, Derived>, +// typedef typename internal::conditional::IsComplex, +// const SparseCwiseUnaryOp, Derived>, // const Derived& -// >::ret ConjugateReturnType; +// >::type ConjugateReturnType; /* \internal the return type of MatrixBase::real() */ -// typedef SparseCwiseUnaryOp, Derived> RealReturnType; +// typedef SparseCwiseUnaryOp, Derived> RealReturnType; /* \internal the return type of MatrixBase::imag() */ -// typedef SparseCwiseUnaryOp, Derived> ImagReturnType; +// typedef SparseCwiseUnaryOp, Derived> ImagReturnType; /** \internal the return type of MatrixBase::adjoint() */ - typedef typename ei_meta_if::IsComplex, - CwiseUnaryOp, Eigen::Transpose >, - Transpose - >::ret AdjointReturnType; + typedef typename internal::conditional::IsComplex, + CwiseUnaryOp, Eigen::Transpose >, + Transpose + >::type AdjointReturnType; + typedef SparseMatrix PlainObject; - #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase - #include "../plugins/CommonCwiseUnaryOps.h" - #include "../plugins/CommonCwiseBinaryOps.h" - #include "../plugins/MatrixCwiseUnaryOps.h" - #include "../plugins/MatrixCwiseBinaryOps.h" - #undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase +# include "../plugins/CommonCwiseUnaryOps.h" +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN +# include EIGEN_SPARSEMATRIXBASE_PLUGIN +# endif +# undef EIGEN_CURRENT_STORAGE_BASE_CLASS +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers @@ -132,10 +147,10 @@ template class SparseMatrixBase : public EigenBase /** \internal the return type of coeff() */ - typedef typename ei_meta_if<_HasDirectAccess, const Scalar&, Scalar>::ret CoeffReturnType; + typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType; /** \internal Represents a matrix with all coefficients equal to one another*/ - typedef CwiseNullaryOp,Matrix > ConstantReturnType; + typedef CwiseNullaryOp,Matrix > ConstantReturnType; /** type of the equivalent square matrix */ typedef Matrix class SparseMatrixBase : public EigenBase Derived& markAsRValue() { m_isRValue = true; return derived(); } SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ } - + inline Derived& operator=(const Derived& other) { // std::cout << "Derived& operator=(const Derived& other)\n"; @@ -183,6 +198,13 @@ template class SparseMatrixBase : public EigenBase this->operator=(other); return derived(); } + + template + Derived& operator=(const ReturnByValue& other) + { + other.evalTo(derived()); + return derived(); + } template @@ -190,14 +212,14 @@ template class SparseMatrixBase : public EigenBase { // std::cout << "Derived& operator=(const MatrixBase& other)\n"; //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - ei_assert(( ((ei_traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || + eigen_assert(( ((internal::traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) && "the transpose operation is supposed to be handled in SparseMatrix::operator="); enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) }; const Index outerSize = other.outerSize(); - //typedef typename ei_meta_if, Derived>::ret TempType; + //typedef typename internal::conditional, Derived>::type TempType; // thanks to shallow copies, we always eval to a tempary Derived temp(other.rows(), other.cols()); @@ -299,14 +321,14 @@ template class SparseMatrixBase : public EigenBase return s; } -// const SparseCwiseUnaryOp::Scalar>,Derived> operator-() const; +// const SparseCwiseUnaryOp::Scalar>,Derived> operator-() const; // template -// const CwiseBinaryOp::Scalar>, Derived, OtherDerived> +// const CwiseBinaryOp::Scalar>, Derived, OtherDerived> // operator+(const SparseMatrixBase &other) const; // template -// const CwiseBinaryOp::Scalar>, Derived, OtherDerived> +// const CwiseBinaryOp::Scalar>, Derived, OtherDerived> // operator-(const SparseMatrixBase &other) const; template @@ -322,10 +344,10 @@ template class SparseMatrixBase : public EigenBase #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \ CwiseBinaryOp< \ - ei_scalar_product_op< \ - typename ei_scalar_product_traits< \ - typename ei_traits::Scalar, \ - typename ei_traits::Scalar \ + internal::scalar_product_op< \ + typename internal::scalar_product_traits< \ + typename internal::traits::Scalar, \ + typename internal::traits::Scalar \ >::ReturnType \ >, \ Derived, \ @@ -336,12 +358,12 @@ template class SparseMatrixBase : public EigenBase EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE cwiseProduct(const MatrixBase &other) const; -// const SparseCwiseUnaryOp::Scalar>, Derived> +// const SparseCwiseUnaryOp::Scalar>, Derived> // operator*(const Scalar& scalar) const; -// const SparseCwiseUnaryOp::Scalar>, Derived> +// const SparseCwiseUnaryOp::Scalar>, Derived> // operator/(const Scalar& scalar) const; -// inline friend const SparseCwiseUnaryOp::Scalar>, Derived> +// inline friend const SparseCwiseUnaryOp::Scalar>, Derived> // operator*(const Scalar& scalar, const SparseMatrixBase& matrix) // { return matrix*scalar; } @@ -379,7 +401,7 @@ template class SparseMatrixBase : public EigenBase #ifdef EIGEN2_SUPPORT // deprecated template - typename ei_plain_matrix_type_column_major::type + typename internal::plain_matrix_type_column_major::type solveTriangular(const MatrixBase& other) const; // deprecated @@ -403,7 +425,7 @@ template class SparseMatrixBase : public EigenBase // void normalize(); Transpose transpose() { return derived(); } - const Transpose transpose() const { return derived(); } + const Transpose transpose() const { return derived(); } // void transposeInPlace(); const AdjointReturnType adjoint() const { return transpose(); } @@ -545,18 +567,18 @@ template class SparseMatrixBase : public EigenBase // template -// const SparseCwiseUnaryOp::Scalar, NewType>, Derived> cast() const; +// const SparseCwiseUnaryOp::Scalar, NewType>, Derived> cast() const; /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. */ - inline const typename ei_eval::type eval() const - { return typename ei_eval::type(derived()); } + inline const typename internal::eval::type eval() const + { return typename internal::eval::type(derived()); } // template -// void swap(MatrixBase EIGEN_REF_TO_TEMPORARY other); +// void swap(MatrixBase const & other); // template // const SparseFlagged marked() const; @@ -585,14 +607,14 @@ template class SparseMatrixBase : public EigenBase Scalar sum() const; // Scalar trace() const; -// typename ei_traits::Scalar minCoeff() const; -// typename ei_traits::Scalar maxCoeff() const; +// typename internal::traits::Scalar minCoeff() const; +// typename internal::traits::Scalar maxCoeff() const; -// typename ei_traits::Scalar minCoeff(int* row, int* col = 0) const; -// typename ei_traits::Scalar maxCoeff(int* row, int* col = 0) const; +// typename internal::traits::Scalar minCoeff(int* row, int* col = 0) const; +// typename internal::traits::Scalar maxCoeff(int* row, int* col = 0) const; // template -// typename ei_result_of::Scalar)>::type +// typename internal::result_of::Scalar)>::type // redux(const BinaryOp& func) const; // template @@ -612,9 +634,9 @@ template class SparseMatrixBase : public EigenBase const VectorwiseOp rowwise() const; const VectorwiseOp colwise() const; - static const CwiseNullaryOp,Derived> Random(int rows, int cols); - static const CwiseNullaryOp,Derived> Random(int size); - static const CwiseNullaryOp,Derived> Random(); + static const CwiseNullaryOp,Derived> Random(int rows, int cols); + static const CwiseNullaryOp,Derived> Random(int size); + static const CwiseNullaryOp,Derived> Random(); template const Select @@ -638,10 +660,10 @@ template class SparseMatrixBase : public EigenBase // { // EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) // EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) -// EIGEN_STATIC_ASSERT((ei_is_same_type::ret), +// EIGEN_STATIC_ASSERT((internal::is_same::value), // YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) // -// ei_assert(derived().size() == other.size()); +// eigen_assert(derived().size() == other.size()); // // short version, but the assembly looks more complicated because // // of the CwiseBinaryOp iterator complexity // // return res = (derived().cwise() * other.derived().conjugate()).sum(); @@ -655,7 +677,7 @@ template class SparseMatrixBase : public EigenBase // if (i.index()==j.index()) // { // // std::cerr << i.value() << " * " << j.value() << "\n"; -// res += i.value() * ei_conj(j.value()); +// res += i.value() * internal::conj(j.value()); // ++i; ++j; // } // else if (i.index() struct SparseSparseProductReturnType { - typedef typename ei_traits::Scalar Scalar; + typedef typename internal::traits::Scalar Scalar; enum { - LhsRowMajor = ei_traits::Flags & RowMajorBit, - RhsRowMajor = ei_traits::Flags & RowMajorBit, + LhsRowMajor = internal::traits::Flags & RowMajorBit, + RhsRowMajor = internal::traits::Flags & RowMajorBit, TransposeRhs = (!LhsRowMajor) && RhsRowMajor, TransposeLhs = LhsRowMajor && (!RhsRowMajor) }; - typedef typename ei_meta_if, - const typename ei_nested::type>::ret LhsNested; + const typename internal::nested::type>::type LhsNested; - typedef typename ei_meta_if, - const typename ei_nested::type>::ret RhsNested; + const typename internal::nested::type>::type RhsNested; typedef SparseSparseProduct Type; }; +namespace internal { template -struct ei_traits > +struct traits > { typedef MatrixXpr XprKind; // clean the nested types: - typedef typename ei_cleantype::type _LhsNested; - typedef typename ei_cleantype::type _RhsNested; + typedef typename remove_all::type _LhsNested; + typedef typename remove_all::type _RhsNested; typedef typename _LhsNested::Scalar Scalar; - typedef typename ei_promote_index_type::Index, - typename ei_traits<_RhsNested>::Index>::type Index; + typedef typename promote_index_type::Index, + typename traits<_RhsNested>::Index>::type Index; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, @@ -85,8 +86,10 @@ struct ei_traits > typedef Sparse StorageKind; }; +} // end namespace internal + template -class SparseSparseProduct : ei_no_assignment_operator, +class SparseSparseProduct : internal::no_assignment_operator, public SparseMatrixBase > { public: @@ -96,8 +99,8 @@ class SparseSparseProduct : ei_no_assignment_operator, private: - typedef typename ei_traits::_LhsNested _LhsNested; - typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename internal::traits::_LhsNested _LhsNested; + typedef typename internal::traits::_RhsNested _RhsNested; public: @@ -105,7 +108,7 @@ class SparseSparseProduct : ei_no_assignment_operator, EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { - ei_assert(lhs.cols() == rhs.rows()); + eigen_assert(lhs.cols() == rhs.rows()); enum { ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseRedux.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseRedux.h index bc10d5a4e..afc49de7a 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseRedux.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseRedux.h @@ -26,10 +26,10 @@ #define EIGEN_SPARSEREDUX_H template -typename ei_traits::Scalar +typename internal::traits::Scalar SparseMatrixBase::sum() const { - ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); Scalar res = 0; for (Index j=0; j::sum() const } template -typename ei_traits >::Scalar +typename internal::traits >::Scalar SparseMatrix<_Scalar,_Options,_Index>::sum() const { - ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } template -typename ei_traits >::Scalar +typename internal::traits >::Scalar SparseVector<_Scalar,_Options,_Index>::sum() const { - ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix::Map(&m_data.value(0), m_data.size()).sum(); } diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseSelfAdjointView.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseSelfAdjointView.h index dd4d925e1..8be358db8 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseSelfAdjointView.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseSelfAdjointView.h @@ -45,24 +45,45 @@ class SparseSelfAdjointTimeDenseProduct; template class DenseTimeSparseSelfAdjointProduct; +template +class SparseSymmetricPermutationProduct; + +namespace internal { + +template +struct traits > : traits { +}; + +template +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); + +template +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); + +} + template class SparseSelfAdjointView + : public EigenBase > { public: typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; + typedef Matrix VectorI; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename internal::remove_all::type _MatrixTypeNested; inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) { - ei_assert(ei_are_flags_consistent::ret); - ei_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); + eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ - const MatrixType& matrix() const { return m_matrix; } + const _MatrixTypeNested& matrix() const { return m_matrix; } + _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); } /** Efficient sparse self-adjoint matrix times dense vector/matrix product */ template @@ -77,7 +98,7 @@ template class SparseSelfAdjointView DenseTimeSparseSelfAdjointProduct operator*(const MatrixBase& lhs, const SparseSelfAdjointView& rhs) { - return DenseTimeSparseSelfAdjointProduct(lhs.derived(), rhs.m_matrix); + return DenseTimeSparseSelfAdjointProduct(lhs.derived(), rhs.m_matrix); } /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: @@ -92,7 +113,35 @@ template class SparseSelfAdjointView * call this function with u.adjoint(). */ template - SparseSelfAdjointView& rankUpdate(const MatrixBase& u, Scalar alpha = Scalar(1)); + SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, Scalar alpha = Scalar(1)); + + /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ + template void evalTo(SparseMatrix& _dest) const + { + internal::permute_symm_to_fullsymm(m_matrix, _dest); + } + + template void evalTo(DynamicSparseMatrix& _dest) const + { + // TODO directly evaluate into _dest; + SparseMatrix tmp(_dest.rows(),_dest.cols()); + internal::permute_symm_to_fullsymm(m_matrix, tmp); + _dest = tmp; + } + + /** \returns an expression of P^-1 H P */ + SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix& perm) const + { + return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm); + } + + template + SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct& permutedMatrix) + { + permutedMatrix.evalTo(*this); + return *this; + } + // const SparseLLT llt() const; // const SparseLDLT ldlt() const; @@ -100,6 +149,8 @@ template class SparseSelfAdjointView protected: const typename MatrixType::Nested m_matrix; + mutable VectorI m_countPerRow; + mutable VectorI m_countPerCol; }; /*************************************************************************** @@ -127,27 +178,29 @@ SparseSelfAdjointView SparseMatrixBase::selfadjointView( template template SparseSelfAdjointView& -SparseSelfAdjointView::rankUpdate(const MatrixBase& u, Scalar alpha) +SparseSelfAdjointView::rankUpdate(const SparseMatrixBase& u, Scalar alpha) { SparseMatrix tmp = u * u.adjoint(); if(alpha==Scalar(0)) - m_matrix = tmp.template triangularView(); + m_matrix.const_cast_derived() = tmp.template triangularView(); else - m_matrix += alpha * tmp.template triangularView(); + m_matrix.const_cast_derived() += alpha * tmp.template triangularView(); - return this; + return *this; } /*************************************************************************** * Implementation of sparse self-adjoint time dense matrix ***************************************************************************/ +namespace internal { template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > { typedef Dense StorageKind; }; +} template class SparseSelfAdjointTimeDenseProduct @@ -162,9 +215,9 @@ class SparseSelfAdjointTimeDenseProduct template void scaleAndAddTo(Dest& dest, Scalar alpha) const { // TODO use alpha - ei_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); - typedef typename ei_cleantype::type _Lhs; - typedef typename ei_cleantype::type _Rhs; + eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); + typedef typename internal::remove_all::type _Lhs; + typedef typename internal::remove_all::type _Rhs; typedef typename _Lhs::InnerIterator LhsInnerIterator; enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit, @@ -189,7 +242,7 @@ class SparseSelfAdjointTimeDenseProduct Index b = LhsIsRowMajor ? i.index() : j; typename Lhs::Scalar v = i.value(); dest.row(a) += (v) * m_rhs.row(b); - dest.row(b) += ei_conj(v) * m_rhs.row(a); + dest.row(b) += internal::conj(v) * m_rhs.row(a); } if (ProcessFirstHalf && i && (i.index()==j)) dest.row(j) += i.value() * m_rhs.row(j); @@ -200,10 +253,12 @@ class SparseSelfAdjointTimeDenseProduct SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&); }; +namespace internal { template -struct ei_traits > - : ei_traits, Lhs, Rhs> > +struct traits > + : traits, Lhs, Rhs> > {}; +} template class DenseTimeSparseSelfAdjointProduct @@ -223,4 +278,177 @@ class DenseTimeSparseSelfAdjointProduct private: DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&); }; + +/*************************************************************************** +* Implementation of symmetric copies and permutations +***************************************************************************/ +namespace internal { + +template +struct traits > : traits { +}; + +template +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +{ + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + typedef SparseMatrix Dest; + typedef Matrix VectorI; + + Dest& dest(_dest.derived()); + enum { + StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor) + }; + eigen_assert(perm==0); + Index size = mat.rows(); + VectorI count; + count.resize(size); + count.setZero(); + dest.resize(size,size); + for(Index j = 0; jj) || (UpLo==Upper && ij) || (UpLo==Upper && i +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +{ + typedef typename MatrixType::Index Index; + typedef typename MatrixType::Scalar Scalar; + typedef SparseMatrix Dest; + Dest& dest(_dest.derived()); + typedef Matrix VectorI; + //internal::conj_if cj; + + Index size = mat.rows(); + VectorI count(size); + count.setZero(); + dest.resize(size,size); + for(Index j = 0; jj)) + continue; + + Index ip = perm ? perm[i] : i; + count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++; + } + } + dest._outerIndexPtr()[0] = 0; + for(Index j=0; jj)) + continue; + + Index ip = perm? perm[i] : i; + Index k = count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++; + dest._innerIndexPtr()[k] = DstUpLo==Lower ? std::max(ip,jp) : std::min(ip,jp); + + if((DstUpLo==Lower && ipjp)) + dest._valuePtr()[k] = conj(it.value()); + else + dest._valuePtr()[k] = it.value(); + } + } +} + +} + +template +class SparseSymmetricPermutationProduct + : public EigenBase > +{ + typedef PermutationMatrix Perm; + public: + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef Matrix VectorI; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename internal::remove_all::type _MatrixTypeNested; + + SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm) + : m_matrix(mat), m_perm(perm) + {} + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + template void evalTo(SparseMatrix& _dest) const + { + internal::permute_symm_to_fullsymm(m_matrix,_dest,m_perm.indices().data()); + } + + template void evalTo(SparseSelfAdjointView& dest) const + { + internal::permute_symm_to_symm(m_matrix,dest.matrix(),m_perm.indices().data()); + } + + protected: + const MatrixTypeNested m_matrix; + const Perm& m_perm; + +}; + #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseSparseProduct.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseSparseProduct.h index c8724c118..cade6fd54 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseSparseProduct.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseSparseProduct.h @@ -25,16 +25,18 @@ #ifndef EIGEN_SPARSESPARSEPRODUCT_H #define EIGEN_SPARSESPARSEPRODUCT_H +namespace internal { + template -static void ei_sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res) +static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef typename ei_cleantype::type::Scalar Scalar; - typedef typename ei_cleantype::type::Index Index; + typedef typename remove_all::type::Scalar Scalar; + typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); - ei_assert(lhs.outerSize() == rhs.innerSize()); + eigen_assert(lhs.outerSize() == rhs.innerSize()); std::vector mask(rows,false); Matrix values(rows); @@ -110,18 +112,18 @@ static void ei_sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& // perform a pseudo in-place sparse * sparse product assuming all matrices are col major template -static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) +static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) { -// return ei_sparse_product_impl2(lhs,rhs,res); +// return sparse_product_impl2(lhs,rhs,res); - typedef typename ei_cleantype::type::Scalar Scalar; - typedef typename ei_cleantype::type::Index Index; + typedef typename remove_all::type::Scalar Scalar; + typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); //int size = lhs.outerSize(); - ei_assert(lhs.outerSize() == rhs.innerSize()); + eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer AmbiVector tempVector(rows); @@ -131,7 +133,12 @@ static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& r float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); - res.resize(rows, cols); + // mimics a resizeByInnerOuter: + if(ResultType::IsRowMajor) + res.resize(cols, rows); + else + res.resize(rows, cols); + res.reserve(Index(ratioRes*rows*cols)); for (Index j=0; j::Flags&RowMajorBit, - int RhsStorageOrder = ei_traits::Flags&RowMajorBit, - int ResStorageOrder = ei_traits::Flags&RowMajorBit> -struct ei_sparse_product_selector; + int LhsStorageOrder = traits::Flags&RowMajorBit, + int RhsStorageOrder = traits::Flags&RowMajorBit, + int ResStorageOrder = traits::Flags&RowMajorBit> +struct sparse_product_selector; template -struct ei_sparse_product_selector +struct sparse_product_selector { - typedef typename ei_traits::type>::Scalar Scalar; + typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { // std::cerr << __LINE__ << "\n"; - typename ei_cleantype::type _res(res.rows(), res.cols()); - ei_sparse_product_impl(lhs, rhs, _res); + typename remove_all::type _res(res.rows(), res.cols()); + sparse_product_impl(lhs, rhs, _res); res.swap(_res); } }; template -struct ei_sparse_product_selector +struct sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { @@ -187,26 +194,26 @@ struct ei_sparse_product_selector // we need a col-major matrix to hold the result typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); - ei_sparse_product_impl(lhs, rhs, _res); + sparse_product_impl(lhs, rhs, _res); res = _res; } }; template -struct ei_sparse_product_selector +struct sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { // std::cerr << __LINE__ << "\n"; // let's transpose the product to get a column x column product - typename ei_cleantype::type _res(res.rows(), res.cols()); - ei_sparse_product_impl(rhs, lhs, _res); + typename remove_all::type _res(res.rows(), res.cols()); + sparse_product_impl(rhs, lhs, _res); res.swap(_res); } }; template -struct ei_sparse_product_selector +struct sparse_product_selector { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { @@ -215,21 +222,22 @@ struct ei_sparse_product_selector ColMajorMatrix colLhs(lhs); ColMajorMatrix colRhs(rhs); // std::cerr << "more...\n"; - ei_sparse_product_impl(colLhs, colRhs, res); + sparse_product_impl(colLhs, colRhs, res); // std::cerr << "OK.\n"; // let's transpose the product to get a column x column product // typedef SparseMatrix SparseTemporaryType; // SparseTemporaryType _res(res.cols(), res.rows()); -// ei_sparse_product_impl(rhs, lhs, _res); +// sparse_product_impl(rhs, lhs, _res); // res = _res.transpose(); } }; -// NOTE the 2 others cases (col row *) must never occurs since they are caught -// by ProductReturnType which transform it to (col col *) by evaluating rhs. +// NOTE the 2 others cases (col row *) must never occur since they are caught +// by ProductReturnType which transforms it to (col col *) by evaluating rhs. +} // end namespace internal // sparse = sparse * sparse template @@ -237,33 +245,34 @@ template inline Derived& SparseMatrixBase::operator=(const SparseSparseProduct& product) { // std::cerr << "there..." << typeid(Lhs).name() << " " << typeid(Lhs).name() << " " << (Derived::Flags&&RowMajorBit) << "\n"; - ei_sparse_product_selector< - typename ei_cleantype::type, - typename ei_cleantype::type, + internal::sparse_product_selector< + typename internal::remove_all::type, + typename internal::remove_all::type, Derived>::run(product.lhs(),product.rhs(),derived()); return derived(); } +namespace internal { template::Flags&RowMajorBit, - int RhsStorageOrder = ei_traits::Flags&RowMajorBit, - int ResStorageOrder = ei_traits::Flags&RowMajorBit> -struct ei_sparse_product_selector2; + int LhsStorageOrder = traits::Flags&RowMajorBit, + int RhsStorageOrder = traits::Flags&RowMajorBit, + int ResStorageOrder = traits::Flags&RowMajorBit> +struct sparse_product_selector2; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { - typedef typename ei_traits::type>::Scalar Scalar; + typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - ei_sparse_product_impl2(lhs, rhs, res); + sparse_product_impl2(lhs, rhs, res); } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { @@ -275,79 +284,79 @@ struct ei_sparse_product_selector2 RowMajorMatrix; // RowMajorMatrix rhsRow = rhs; // RowMajorMatrix resRow(res.rows(), res.cols()); -// ei_sparse_product_impl2(rhsRow, lhs, resRow); +// sparse_product_impl2(rhsRow, lhs, resRow); // res = resRow; } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(res.rows(), res.cols()); - ei_sparse_product_impl2(rhs, lhsRow, resRow); + sparse_product_impl2(rhs, lhsRow, resRow); res = resRow; } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(res.rows(), res.cols()); - ei_sparse_product_impl2(rhs, lhs, resRow); + sparse_product_impl2(rhs, lhs, resRow); res = resRow; } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { - typedef typename ei_traits::type>::Scalar Scalar; + typedef typename traits::type>::Scalar Scalar; static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(res.rows(), res.cols()); - ei_sparse_product_impl2(lhs, rhs, resCol); + sparse_product_impl2(lhs, rhs, resCol); res = resCol; } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(res.rows(), res.cols()); - ei_sparse_product_impl2(lhsCol, rhs, resCol); + sparse_product_impl2(lhsCol, rhs, resCol); res = resCol; } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(res.rows(), res.cols()); - ei_sparse_product_impl2(lhs, rhsCol, resCol); + sparse_product_impl2(lhs, rhsCol, resCol); res = resCol; } }; template -struct ei_sparse_product_selector2 +struct sparse_product_selector2 { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { @@ -355,26 +364,28 @@ struct ei_sparse_product_selector2(rhs, lhs, aux); +// sparse_product_impl2(rhs, lhs, aux); // // ColMajorMatrix aux2 = aux.transpose(); // res = aux; typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol(lhs); ColMajorMatrix rhsCol(rhs); ColMajorMatrix resCol(res.rows(), res.cols()); - ei_sparse_product_impl2(lhsCol, rhsCol, resCol); + sparse_product_impl2(lhsCol, rhsCol, resCol); res = resCol; } }; +} // end namespace internal + template template inline void SparseMatrixBase::_experimentalNewProduct(const Lhs& lhs, const Rhs& rhs) { //derived().resize(lhs.rows(), rhs.cols()); - ei_sparse_product_selector2< - typename ei_cleantype::type, - typename ei_cleantype::type, + internal::sparse_product_selector2< + typename internal::remove_all::type, + typename internal::remove_all::type, Derived>::run(lhs,rhs,derived()); } diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseTranspose.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseTranspose.h index 79e0a04db..2aea2fa32 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseTranspose.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseTranspose.h @@ -28,7 +28,7 @@ template class TransposeImpl : public SparseMatrixBase > { - typedef typename ei_cleantype::type _MatrixTypeNested; + typedef typename internal::remove_all::type _MatrixTypeNested; public: EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose) diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseTriangularView.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseTriangularView.h index 929f58416..319eaf066 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseTriangularView.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseTriangularView.h @@ -25,27 +25,31 @@ #ifndef EIGEN_SPARSE_TRIANGULARVIEW_H #define EIGEN_SPARSE_TRIANGULARVIEW_H +namespace internal { + template -struct ei_traits > -: public ei_traits +struct traits > +: public traits {}; +} // namespace internal + template class SparseTriangularView : public SparseMatrixBase > { enum { SkipFirst = (Mode==Lower && !(MatrixType::Flags&RowMajorBit)) || (Mode==Upper && (MatrixType::Flags&RowMajorBit)) }; public: + + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView) class InnerIterator; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; - inline Index rows() { return m_matrix.rows(); } - inline Index cols() { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } - typedef typename ei_meta_if::ret, - MatrixType, const MatrixType&>::ret MatrixTypeNested; + typedef typename internal::conditional::ret, + MatrixType, const MatrixType&>::type MatrixTypeNested; inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {} @@ -53,7 +57,7 @@ template class SparseTriangularView inline const MatrixType& nestedExpression() const { return m_matrix; } template - typename ei_plain_matrix_type_column_major::type + typename internal::plain_matrix_type_column_major::type solve(const MatrixBase& other) const; template void solveInPlace(MatrixBase& other) const; @@ -81,7 +85,7 @@ class SparseTriangularView::InnerIterator : public MatrixType:: EIGEN_STRONG_INLINE operator bool() const { - return SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() < this->outer()); + return SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer()); } }; diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseUtil.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseUtil.h index ddfa115dc..db9ae98e7 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseUtil.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseUtil.h @@ -58,15 +58,15 @@ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ - typedef typename Eigen::ei_traits::Scalar Scalar; \ + typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ - typedef typename Eigen::ei_nested::type Nested; \ - typedef typename Eigen::ei_traits::StorageKind StorageKind; \ - typedef typename Eigen::ei_traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ - Flags = Eigen::ei_traits::Flags, \ - CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ + typedef typename Eigen::internal::nested::type Nested; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::traits::Index Index; \ + enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ + Flags = Eigen::internal::traits::Flags, \ + CoeffReadCost = Eigen::internal::traits::CoeffReadCost, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ using Base::derived; \ @@ -98,29 +98,33 @@ template class DenseTimeSparseProduct; template class SparseDenseOuterProduct; template struct SparseSparseProductReturnType; -template::ColsAtCompileTime> struct DenseSparseProductReturnType; -template::ColsAtCompileTime> struct SparseDenseProductReturnType; +template::ColsAtCompileTime> struct DenseSparseProductReturnType; +template::ColsAtCompileTime> struct SparseDenseProductReturnType; -template struct ei_eval +namespace internal { + +template struct eval { - typedef typename ei_traits::Scalar _Scalar; + typedef typename traits::Scalar _Scalar; enum { - _Flags = ei_traits::Flags + _Flags = traits::Flags }; public: typedef SparseMatrix<_Scalar, _Flags> type; }; -template struct ei_plain_matrix_type +template struct plain_matrix_type { - typedef typename ei_traits::Scalar _Scalar; + typedef typename traits::Scalar _Scalar; enum { - _Flags = ei_traits::Flags + _Flags = traits::Flags }; public: typedef SparseMatrix<_Scalar, _Flags> type; }; +} // end namespace internal + #endif // EIGEN_SPARSEUTIL_H diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseVector.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseVector.h index c5d0a6981..ce4bb51a2 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseVector.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseVector.h @@ -29,13 +29,17 @@ * * \brief a sparse vector class * - * \param _Scalar the scalar type, i.e. the type of the coefficients + * \tparam _Scalar the scalar type, i.e. the type of the coefficients * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN. */ + +namespace internal { template -struct ei_traits > +struct traits > { typedef _Scalar Scalar; typedef _Index Index; @@ -53,6 +57,7 @@ struct ei_traits > SupportedAccessPatterns = InnerRandomAccessPattern }; }; +} template class SparseVector @@ -68,7 +73,11 @@ class SparseVector public: typedef SparseMatrixBase SparseBase; - enum { IsColVector = ei_traits::IsColVector }; + enum { IsColVector = internal::traits::IsColVector }; + + enum { + Options = _Options + }; CompressedStorage m_data; Index m_size; @@ -82,7 +91,7 @@ class SparseVector EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } EIGEN_STRONG_INLINE Index outerSize() const { return 1; } - EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { ei_assert(j==0); return m_size; } + EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; } EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); } @@ -92,14 +101,14 @@ class SparseVector inline Scalar coeff(Index row, Index col) const { - ei_assert((IsColVector ? col : row)==0); + eigen_assert((IsColVector ? col : row)==0); return coeff(IsColVector ? row : col); } inline Scalar coeff(Index i) const { return m_data.at(i); } inline Scalar& coeffRef(Index row, Index col) { - ei_assert((IsColVector ? col : row)==0); + eigen_assert((IsColVector ? col : row)==0); return coeff(IsColVector ? row : col); } @@ -125,12 +134,12 @@ class SparseVector inline void startVec(Index outer) { - ei_assert(outer==0); + eigen_assert(outer==0); } inline Scalar& insertBackByOuterInner(Index outer, Index inner) { - ei_assert(outer==0); + eigen_assert(outer==0); return insertBack(inner); } inline Scalar& insertBack(Index i) @@ -143,25 +152,25 @@ class SparseVector { Index inner = IsColVector ? row : col; Index outer = IsColVector ? col : row; - ei_assert(outer==0); + eigen_assert(outer==0); return insert(inner); } Scalar& insert(Index i) { Index startId = 0; - Index id = m_data.size() - 1; + Index p = m_data.size() - 1; // TODO smart realloc - m_data.resize(id+2,1); + m_data.resize(p+2,1); - while ( (id >= startId) && (m_data.index(id) > i) ) + while ( (p >= startId) && (m_data.index(p) > i) ) { - m_data.index(id+1) = m_data.index(id); - m_data.value(id+1) = m_data.value(id); - --id; + m_data.index(p+1) = m_data.index(p); + m_data.value(p+1) = m_data.value(p); + --p; } - m_data.index(id+1) = i; - m_data.value(id+1) = 0; - return m_data.value(id+1); + m_data.index(p+1) = i; + m_data.value(p+1) = 0; + return m_data.value(p+1); } /** @@ -178,7 +187,7 @@ class SparseVector void resize(Index rows, Index cols) { - ei_assert(rows==1 || cols==1); + eigen_assert(rows==1 || cols==1); resize(IsColVector ? rows : cols); } @@ -260,9 +269,9 @@ class SparseVector // // 1 - compute the number of coeffs per dest inner vector // // 2 - do the actual copy/eval // // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed -// typedef typename ei_nested::type OtherCopy; +// typedef typename internal::nested::type OtherCopy; // OtherCopy otherCopy(other.derived()); -// typedef typename ei_cleantype::type _OtherCopy; +// typedef typename internal::remove_all::type _OtherCopy; // // resize(other.rows(), other.cols()); // Eigen::Map(m_outerIndex,outerSize()).setZero(); @@ -321,7 +330,7 @@ class SparseVector // { // if (m_data.index(i)==other.m_data.index(j)) // { -// res += m_data.value(i) * ei_conj(other.m_data.value(j)); +// res += m_data.value(i) * internal::conj(other.m_data.value(j)); // ++i; ++j; // } // else if (m_data.index(i) @@ -386,7 +399,7 @@ class SparseVector::InnerIterator InnerIterator(const SparseVector& vec, Index outer=0) : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) { - ei_assert(outer==0); + eigen_assert(outer==0); } InnerIterator(const CompressedStorage& data) diff --git a/gtsam/3rdparty/Eigen/src/Sparse/SparseView.h b/gtsam/3rdparty/Eigen/src/Sparse/SparseView.h index 5a152b255..243065610 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/SparseView.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/SparseView.h @@ -26,21 +26,25 @@ #ifndef EIGEN_SPARSEVIEW_H #define EIGEN_SPARSEVIEW_H +namespace internal { + template -struct ei_traits > : ei_traits +struct traits > : traits { typedef int Index; typedef Sparse StorageKind; enum { - Flags = int(ei_traits::Flags) & (RowMajorBit) + Flags = int(traits::Flags) & (RowMajorBit) }; }; +} // end namespace internal + template class SparseView : public SparseMatrixBase > { typedef typename MatrixType::Nested MatrixTypeNested; - typedef typename ei_cleantype::type _MatrixTypeNested; + typedef typename internal::remove_all::type _MatrixTypeNested; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView) @@ -88,7 +92,7 @@ protected: private: void incrementToNonZero() { - while(ei_isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon) && (bool(*this))) + while(internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon) && (bool(*this))) { IterBase::operator++(); } diff --git a/gtsam/3rdparty/Eigen/src/Sparse/TriangularSolver.h b/gtsam/3rdparty/Eigen/src/Sparse/TriangularSolver.h index 3233ab0fd..73468e044 100644 --- a/gtsam/3rdparty/Eigen/src/Sparse/TriangularSolver.h +++ b/gtsam/3rdparty/Eigen/src/Sparse/TriangularSolver.h @@ -25,18 +25,20 @@ #ifndef EIGEN_SPARSETRIANGULARSOLVER_H #define EIGEN_SPARSETRIANGULARSOLVER_H +namespace internal { + template::Flags) & RowMajorBit> -struct ei_sparse_solve_triangular_selector; + int StorageOrder = int(traits::Flags) & RowMajorBit> +struct sparse_solve_triangular_selector; // forward substitution, row-major template -struct ei_sparse_solve_triangular_selector +struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) @@ -60,7 +62,7 @@ struct ei_sparse_solve_triangular_selector other.coeffRef(i,col) = tmp; else { - ei_assert(lastIndex==i); + eigen_assert(lastIndex==i); other.coeffRef(i,col) = tmp/lastVal; } } @@ -70,7 +72,7 @@ struct ei_sparse_solve_triangular_selector // backward substitution, row-major template -struct ei_sparse_solve_triangular_selector +struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) @@ -93,7 +95,7 @@ struct ei_sparse_solve_triangular_selector else { typename Lhs::InnerIterator it(lhs, i); - ei_assert(it && it.index() == i); + eigen_assert(it && it.index() == i); other.coeffRef(i,col) = tmp/it.value(); } } @@ -103,7 +105,7 @@ struct ei_sparse_solve_triangular_selector // forward substitution, col-major template -struct ei_sparse_solve_triangular_selector +struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) @@ -118,7 +120,7 @@ struct ei_sparse_solve_triangular_selector typename Lhs::InnerIterator it(lhs, i); if(!(Mode & UnitDiag)) { - ei_assert(it.index()==i); + eigen_assert(it.index()==i); tmp /= it.value(); } if (it && it.index()==i) @@ -133,7 +135,7 @@ struct ei_sparse_solve_triangular_selector // backward substitution, col-major template -struct ei_sparse_solve_triangular_selector +struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; static void run(const Lhs& lhs, Rhs& other) @@ -160,22 +162,24 @@ struct ei_sparse_solve_triangular_selector } }; +} // end namespace internal + template template void SparseTriangularView::solveInPlace(MatrixBase& other) const { - ei_assert(m_matrix.cols() == m_matrix.rows()); - ei_assert(m_matrix.cols() == other.rows()); - ei_assert(!(Mode & ZeroDiag)); - ei_assert(Mode & (Upper|Lower)); + eigen_assert(m_matrix.cols() == m_matrix.rows()); + eigen_assert(m_matrix.cols() == other.rows()); + eigen_assert(!(Mode & ZeroDiag)); + eigen_assert(Mode & (Upper|Lower)); - enum { copy = ei_traits::Flags & RowMajorBit }; + enum { copy = internal::traits::Flags & RowMajorBit }; - typedef typename ei_meta_if::type, OtherDerived&>::ret OtherCopy; + typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; OtherCopy otherCopy(other.derived()); - ei_sparse_solve_triangular_selector::type, Mode>::run(m_matrix, otherCopy); + internal::sparse_solve_triangular_selector::type, Mode>::run(m_matrix, otherCopy); if (copy) other = otherCopy; @@ -183,16 +187,18 @@ void SparseTriangularView::solveInPlace(MatrixBase template -typename ei_plain_matrix_type_column_major::type +typename internal::plain_matrix_type_column_major::type SparseTriangularView::solve(const MatrixBase& other) const { - typename ei_plain_matrix_type_column_major::type res(other); + typename internal::plain_matrix_type_column_major::type res(other); solveInPlace(res); return res; } // pure sparse path +namespace internal { + template -struct ei_sparse_solve_triangular_sparse_selector; +struct sparse_solve_triangular_sparse_selector; // forward substitution, col-major template -struct ei_sparse_solve_triangular_sparse_selector +struct sparse_solve_triangular_sparse_selector { typedef typename Rhs::Scalar Scalar; - typedef typename ei_promote_index_type::Index, - typename ei_traits::Index>::type Index; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; static void run(const Lhs& lhs, Rhs& other) { const bool IsLower = (UpLo==Lower); @@ -243,7 +249,7 @@ struct ei_sparse_solve_triangular_sparse_selector { if (IsLower) { - ei_assert(it.index()==i); + eigen_assert(it.index()==i); ci /= it.value(); } else @@ -283,22 +289,24 @@ struct ei_sparse_solve_triangular_sparse_selector } }; +} // end namespace internal + template template void SparseTriangularView::solveInPlace(SparseMatrixBase& other) const { - ei_assert(m_matrix.cols() == m_matrix.rows()); - ei_assert(m_matrix.cols() == other.rows()); - ei_assert(!(Mode & ZeroDiag)); - ei_assert(Mode & (Upper|Lower)); + eigen_assert(m_matrix.cols() == m_matrix.rows()); + eigen_assert(m_matrix.cols() == other.rows()); + eigen_assert(!(Mode & ZeroDiag)); + eigen_assert(Mode & (Upper|Lower)); -// enum { copy = ei_traits::Flags & RowMajorBit }; +// enum { copy = internal::traits::Flags & RowMajorBit }; -// typedef typename ei_meta_if::type, OtherDerived&>::ret OtherCopy; +// typedef typename internal::conditional::type, OtherDerived&>::type OtherCopy; // OtherCopy otherCopy(other.derived()); - ei_sparse_solve_triangular_sparse_selector::run(m_matrix, other.derived()); + internal::sparse_solve_triangular_sparse_selector::run(m_matrix, other.derived()); // if (copy) // other = otherCopy; @@ -319,10 +327,10 @@ void SparseMatrixBase::solveTriangularInPlace(MatrixBase& /** \deprecated */ template template -typename ei_plain_matrix_type_column_major::type +typename internal::plain_matrix_type_column_major::type SparseMatrixBase::solveTriangular(const MatrixBase& other) const { - typename ei_plain_matrix_type_column_major::type res(other); + typename internal::plain_matrix_type_column_major::type res(other); derived().solveTriangularInPlace(res); return res; } diff --git a/gtsam/3rdparty/Eigen/src/StlSupport/details.h b/gtsam/3rdparty/Eigen/src/StlSupport/details.h index 5ee4dba94..397c8ef85 100644 --- a/gtsam/3rdparty/Eigen/src/StlSupport/details.h +++ b/gtsam/3rdparty/Eigen/src/StlSupport/details.h @@ -27,58 +27,66 @@ #define EIGEN_STL_DETAILS_H #ifndef EIGEN_ALIGNED_ALLOCATOR - #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator + #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator #endif namespace Eigen { - // This one is needed to prevent reimplementing the whole std::vector. - template - class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR - { - public: - typedef size_t size_type; - typedef ptrdiff_t difference_type; - typedef T* pointer; - typedef const T* const_pointer; - typedef T& reference; - typedef const T& const_reference; - typedef T value_type; + // This one is needed to prevent reimplementing the whole std::vector. + template + class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR + { + public: + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; - template - struct rebind - { - typedef aligned_allocator_indirection other; - }; + template + struct rebind + { + typedef aligned_allocator_indirection other; + }; - aligned_allocator_indirection() {} - aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR() {} - aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} - template - aligned_allocator_indirection(const aligned_allocator_indirection& ) {} - template - aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} - ~aligned_allocator_indirection() {} - }; + aligned_allocator_indirection() {} + aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR() {} + aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} + template + aligned_allocator_indirection(const aligned_allocator_indirection& ) {} + template + aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR& ) {} + ~aligned_allocator_indirection() {} + }; #ifdef _MSC_VER - // sometimes, MSVC detects, at compile time, that the argument x - // in std::vector::resize(size_t s,T x) won't be aligned and generate an error - // even if this function is never called. Whence this little wrapper. -#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) Eigen::ei_workaround_msvc_stl_support - template struct ei_workaround_msvc_stl_support : public T - { - inline ei_workaround_msvc_stl_support() : T() {} - inline ei_workaround_msvc_stl_support(const T& other) : T(other) {} - inline operator T& () { return *static_cast(this); } - inline operator const T& () const { return *static_cast(this); } - template - inline T& operator=(const OtherT& other) - { T::operator=(other); return *this; } - inline ei_workaround_msvc_stl_support& operator=(const ei_workaround_msvc_stl_support& other) - { T::operator=(other); return *this; } - }; + // sometimes, MSVC detects, at compile time, that the argument x + // in std::vector::resize(size_t s,T x) won't be aligned and generate an error + // even if this function is never called. Whence this little wrapper. +#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \ + typename Eigen::internal::conditional< \ + Eigen::internal::is_arithmetic::value, \ + T, \ + Eigen::internal::workaround_msvc_stl_support \ + >::type + + namespace internal { + template struct workaround_msvc_stl_support : public T + { + inline workaround_msvc_stl_support() : T() {} + inline workaround_msvc_stl_support(const T& other) : T(other) {} + inline operator T& () { return *static_cast(this); } + inline operator const T& () const { return *static_cast(this); } + template + inline T& operator=(const OtherT& other) + { T::operator=(other); return *this; } + inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other) + { T::operator=(other); return *this; } + }; + } #else diff --git a/gtsam/3rdparty/Eigen/src/misc/Image.h b/gtsam/3rdparty/Eigen/src/misc/Image.h index 32392fd29..19b3e08cb 100644 --- a/gtsam/3rdparty/Eigen/src/misc/Image.h +++ b/gtsam/3rdparty/Eigen/src/misc/Image.h @@ -25,11 +25,13 @@ #ifndef EIGEN_MISC_IMAGE_H #define EIGEN_MISC_IMAGE_H -/** \class ei_image_retval_base +namespace internal { + +/** \class image_retval_base * */ template -struct ei_traits > +struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< @@ -43,15 +45,15 @@ struct ei_traits > > ReturnType; }; -template struct ei_image_retval_base - : public ReturnByValue > +template struct image_retval_base + : public ReturnByValue > { typedef _DecompositionType DecompositionType; typedef typename DecompositionType::MatrixType MatrixType; - typedef ReturnByValue Base; + typedef ReturnByValue Base; typedef typename Base::Index Index; - ei_image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) + image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank == 0 ? 1 : m_rank), m_originalMatrix(originalMatrix) @@ -65,7 +67,7 @@ template struct ei_image_retval_base template inline void evalTo(Dest& dst) const { - static_cast*>(this)->evalTo(dst); + static_cast*>(this)->evalTo(dst); } protected: @@ -74,18 +76,20 @@ template struct ei_image_retval_base const MatrixType& m_originalMatrix; }; +} // end namespace internal + #define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ - typedef ei_image_retval_base Base; \ + typedef Eigen::internal::image_retval_base Base; \ using Base::dec; \ using Base::originalMatrix; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ - ei_image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ + image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ : Base(dec, originalMatrix) {} #endif // EIGEN_MISC_IMAGE_H diff --git a/gtsam/3rdparty/Eigen/src/misc/Kernel.h b/gtsam/3rdparty/Eigen/src/misc/Kernel.h index 38a2d4097..0115970e8 100644 --- a/gtsam/3rdparty/Eigen/src/misc/Kernel.h +++ b/gtsam/3rdparty/Eigen/src/misc/Kernel.h @@ -25,11 +25,13 @@ #ifndef EIGEN_MISC_KERNEL_H #define EIGEN_MISC_KERNEL_H -/** \class ei_kernel_retval_base +namespace internal { + +/** \class kernel_retval_base * */ template -struct ei_traits > +struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< @@ -45,14 +47,14 @@ struct ei_traits > > ReturnType; }; -template struct ei_kernel_retval_base - : public ReturnByValue > +template struct kernel_retval_base + : public ReturnByValue > { typedef _DecompositionType DecompositionType; - typedef ReturnByValue Base; + typedef ReturnByValue Base; typedef typename Base::Index Index; - ei_kernel_retval_base(const DecompositionType& dec) + kernel_retval_base(const DecompositionType& dec) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank) @@ -65,7 +67,7 @@ template struct ei_kernel_retval_base template inline void evalTo(Dest& dst) const { - static_cast*>(this)->evalTo(dst); + static_cast*>(this)->evalTo(dst); } protected: @@ -73,16 +75,18 @@ template struct ei_kernel_retval_base Index m_rank, m_cols; }; +} // end namespace internal + #define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ - typedef ei_kernel_retval_base Base; \ + typedef Eigen::internal::kernel_retval_base Base; \ using Base::dec; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ - ei_kernel_retval(const DecompositionType& dec) : Base(dec) {} + kernel_retval(const DecompositionType& dec) : Base(dec) {} #endif // EIGEN_MISC_KERNEL_H diff --git a/gtsam/3rdparty/Eigen/src/misc/Solve.h b/gtsam/3rdparty/Eigen/src/misc/Solve.h index d6fc67406..b7cbcadb3 100644 --- a/gtsam/3rdparty/Eigen/src/misc/Solve.h +++ b/gtsam/3rdparty/Eigen/src/misc/Solve.h @@ -25,11 +25,13 @@ #ifndef EIGEN_MISC_SOLVE_H #define EIGEN_MISC_SOLVE_H -/** \class ei_solve_retval_base +namespace internal { + +/** \class solve_retval_base * */ template -struct ei_traits > +struct traits > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix > Rhs::MaxColsAtCompileTime> ReturnType; }; -template struct ei_solve_retval_base - : public ReturnByValue > +template struct solve_retval_base + : public ReturnByValue > { - typedef typename ei_cleantype::type RhsNestedCleaned; + typedef typename remove_all::type RhsNestedCleaned; typedef _DecompositionType DecompositionType; - typedef ReturnByValue Base; + typedef ReturnByValue Base; typedef typename Base::Index Index; - ei_solve_retval_base(const DecompositionType& dec, const Rhs& rhs) + solve_retval_base(const DecompositionType& dec, const Rhs& rhs) : m_dec(dec), m_rhs(rhs) {} @@ -59,7 +61,7 @@ template struct ei_solve_retval_base template inline void evalTo(Dest& dst) const { - static_cast*>(this)->evalTo(dst); + static_cast*>(this)->evalTo(dst); } protected: @@ -67,17 +69,19 @@ template struct ei_solve_retval_base const typename Rhs::Nested m_rhs; }; +} // end namespace internal + #define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType,Rhs) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef typename MatrixType::Index Index; \ - typedef ei_solve_retval_base Base; \ + typedef Eigen::internal::solve_retval_base Base; \ using Base::dec; \ using Base::rhs; \ using Base::rows; \ using Base::cols; \ - ei_solve_retval(const DecompositionType& dec, const Rhs& rhs) \ + solve_retval(const DecompositionType& dec, const Rhs& rhs) \ : Base(dec, rhs) {} #endif // EIGEN_MISC_SOLVE_H diff --git a/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseBinaryOps.h index b48a58234..7d509e78f 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseBinaryOps.h +++ b/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseBinaryOps.h @@ -14,10 +14,10 @@ operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * \sa MatrixBase::cwiseQuotient */ template -EIGEN_STRONG_INLINE const CwiseBinaryOp, Derived, OtherDerived> +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of \c *this and \a other @@ -27,7 +27,7 @@ operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * * \sa max() */ -EIGEN_MAKE_CWISE_BINARY_OP(min,ei_scalar_min_op) +EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op) /** \returns an expression of the coefficient-wise max of \c *this and \a other * @@ -36,7 +36,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(min,ei_scalar_min_op) * * \sa min() */ -EIGEN_MAKE_CWISE_BINARY_OP(max,ei_scalar_max_op) +EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op) /** \returns an expression of the coefficient-wise \< operator of *this and \a other * @@ -111,13 +111,13 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to) * * \sa operator+=(), operator-() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> operator+(const Scalar& scalar) const { - return CwiseUnaryOp, Derived>(derived(), ei_scalar_add_op(scalar)); + return CwiseUnaryOp, const Derived>(derived(), internal::scalar_add_op(scalar)); } -friend inline const CwiseUnaryOp, Derived> +friend inline const CwiseUnaryOp, const Derived> operator+(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS& other) { return other + scalar; @@ -130,13 +130,13 @@ operator+(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS& * * \sa operator+(), operator-=() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> operator-(const Scalar& scalar) const { return *this + (-scalar); } -friend inline const CwiseUnaryOp, CwiseUnaryOp,Derived> > +friend inline const CwiseUnaryOp, const CwiseUnaryOp, const Derived> > operator-(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS& other) { return (-other) + scalar; diff --git a/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseUnaryOps.h index 9695bf921..0dffaf413 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseUnaryOps.h +++ b/gtsam/3rdparty/Eigen/src/plugins/ArrayCwiseUnaryOps.h @@ -7,7 +7,7 @@ * * \sa abs2() */ -EIGEN_STRONG_INLINE const CwiseUnaryOp, Derived> +EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> abs() const { return derived(); @@ -20,7 +20,7 @@ abs() const * * \sa abs(), square() */ -EIGEN_STRONG_INLINE const CwiseUnaryOp, Derived> +EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> abs2() const { return derived(); @@ -33,7 +33,7 @@ abs2() const * * \sa pow(), log(), sin(), cos() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> exp() const { return derived(); @@ -46,7 +46,7 @@ exp() const * * \sa exp() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> log() const { return derived(); @@ -59,7 +59,7 @@ log() const * * \sa pow(), square() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> sqrt() const { return derived(); @@ -70,9 +70,9 @@ sqrt() const * Example: \include Cwise_cos.cpp * Output: \verbinclude Cwise_cos.out * - * \sa sin(), exp() + * \sa sin(), acos() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> cos() const { return derived(); @@ -84,14 +84,53 @@ cos() const * Example: \include Cwise_sin.cpp * Output: \verbinclude Cwise_sin.out * - * \sa cos(), exp() + * \sa cos(), asin() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> sin() const { return derived(); } +/** \returns an expression of the coefficient-wise arc cosine of *this. + * + * Example: \include Cwise_acos.cpp + * Output: \verbinclude Cwise_acos.out + * + * \sa cos(), asin() + */ +inline const CwiseUnaryOp, const Derived> +acos() const +{ + return derived(); +} + +/** \returns an expression of the coefficient-wise arc sine of *this. + * + * Example: \include Cwise_asin.cpp + * Output: \verbinclude Cwise_asin.out + * + * \sa sin(), acos() + */ +inline const CwiseUnaryOp, const Derived> +asin() const +{ + return derived(); +} + +/** \returns an expression of the coefficient-wise tan of *this. + * + * Example: \include Cwise_tan.cpp + * Output: \verbinclude Cwise_tan.out + * + * \sa cos(), sin() + */ +inline const CwiseUnaryOp, Derived> +tan() const +{ + return derived(); +} + /** \returns an expression of the coefficient-wise power of *this to the given exponent. * @@ -100,11 +139,11 @@ sin() const * * \sa exp(), log() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> pow(const Scalar& exponent) const { - return CwiseUnaryOp,Derived> - (derived(), ei_scalar_pow_op(exponent)); + return CwiseUnaryOp, const Derived> + (derived(), internal::scalar_pow_op(exponent)); } @@ -115,7 +154,7 @@ pow(const Scalar& exponent) const * * \sa operator/(), operator*() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> inverse() const { return derived(); @@ -128,7 +167,7 @@ inverse() const * * \sa operator/(), operator*(), abs2() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> square() const { return derived(); @@ -141,16 +180,16 @@ square() const * * \sa square(), pow() */ -inline const CwiseUnaryOp, Derived> +inline const CwiseUnaryOp, const Derived> cube() const { return derived(); } #define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \ - inline const CwiseUnaryOp >,Derived> \ + inline const CwiseUnaryOp >, const Derived> \ METHOD_NAME(const Scalar& s) const { \ - return CwiseUnaryOp >,Derived> \ + return CwiseUnaryOp >, const Derived> \ (derived(), std::bind2nd(FUNCTOR(), s)); \ } diff --git a/gtsam/3rdparty/Eigen/src/plugins/BlockMethods.h b/gtsam/3rdparty/Eigen/src/plugins/BlockMethods.h index 3aae95a64..4eba93338 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/BlockMethods.h +++ b/gtsam/3rdparty/Eigen/src/plugins/BlockMethods.h @@ -29,17 +29,23 @@ #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal expression type of a column */ -typedef Block::RowsAtCompileTime, 1, !IsRowMajor> ColXpr; +typedef Block::RowsAtCompileTime, 1, !IsRowMajor> ColXpr; +typedef const Block::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr; /** \internal expression type of a row */ -typedef Block::ColsAtCompileTime, IsRowMajor> RowXpr; +typedef Block::ColsAtCompileTime, IsRowMajor> RowXpr; +typedef const Block::ColsAtCompileTime, IsRowMajor> ConstRowXpr; /** \internal expression type of a block of whole columns */ -typedef Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr; +typedef Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr; +typedef const Block::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr; /** \internal expression type of a block of whole rows */ -typedef Block::ColsAtCompileTime, IsRowMajor> RowsBlockXpr; +typedef Block::ColsAtCompileTime, IsRowMajor> RowsBlockXpr; +typedef const Block::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr; /** \internal expression type of a block of whole columns */ -template struct NColsBlockXpr { typedef Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; +template struct NColsBlockXpr { typedef Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; +template struct ConstNColsBlockXpr { typedef const Block::RowsAtCompileTime, N, !IsRowMajor> Type; }; /** \internal expression type of a block of whole rows */ -template struct NRowsBlockXpr { typedef Block::ColsAtCompileTime, IsRowMajor> Type; }; +template struct NRowsBlockXpr { typedef Block::ColsAtCompileTime, IsRowMajor> Type; }; +template struct ConstNRowsBlockXpr { typedef const Block::ColsAtCompileTime, IsRowMajor> Type; }; #endif // not EIGEN_PARSED_BY_DOXYGEN @@ -66,9 +72,9 @@ inline Block block(Index startRow, Index startCol, Index blockRows, Ind } /** This is the const version of block(Index,Index,Index,Index). */ -inline const Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) const +inline const Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) const { - return Block(derived(), startRow, startCol, blockRows, blockCols); + return Block(derived(), startRow, startCol, blockRows, blockCols); } @@ -90,9 +96,9 @@ inline Block topRightCorner(Index cRows, Index cCols) } /** This is the const version of topRightCorner(Index, Index).*/ -inline const Block topRightCorner(Index cRows, Index cCols) const +inline const Block topRightCorner(Index cRows, Index cCols) const { - return Block(derived(), 0, cols() - cCols, cRows, cCols); + return Block(derived(), 0, cols() - cCols, cRows, cCols); } /** \returns an expression of a fixed-size top-right corner of *this. @@ -112,9 +118,9 @@ inline Block topRightCorner() /** This is the const version of topRightCorner().*/ template -inline const Block topRightCorner() const +inline const Block topRightCorner() const { - return Block(derived(), 0, cols() - CCols); + return Block(derived(), 0, cols() - CCols); } @@ -136,9 +142,9 @@ inline Block topLeftCorner(Index cRows, Index cCols) } /** This is the const version of topLeftCorner(Index, Index).*/ -inline const Block topLeftCorner(Index cRows, Index cCols) const +inline const Block topLeftCorner(Index cRows, Index cCols) const { - return Block(derived(), 0, 0, cRows, cCols); + return Block(derived(), 0, 0, cRows, cCols); } /** \returns an expression of a fixed-size top-left corner of *this. @@ -158,9 +164,9 @@ inline Block topLeftCorner() /** This is the const version of topLeftCorner().*/ template -inline const Block topLeftCorner() const +inline const Block topLeftCorner() const { - return Block(derived(), 0, 0); + return Block(derived(), 0, 0); } @@ -181,9 +187,9 @@ inline Block bottomRightCorner(Index cRows, Index cCols) } /** This is the const version of bottomRightCorner(Index, Index).*/ -inline const Block bottomRightCorner(Index cRows, Index cCols) const +inline const Block bottomRightCorner(Index cRows, Index cCols) const { - return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); + return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /** \returns an expression of a fixed-size bottom-right corner of *this. @@ -203,9 +209,9 @@ inline Block bottomRightCorner() /** This is the const version of bottomRightCorner().*/ template -inline const Block bottomRightCorner() const +inline const Block bottomRightCorner() const { - return Block(derived(), rows() - CRows, cols() - CCols); + return Block(derived(), rows() - CRows, cols() - CCols); } @@ -226,9 +232,9 @@ inline Block bottomLeftCorner(Index cRows, Index cCols) } /** This is the const version of bottomLeftCorner(Index, Index).*/ -inline const Block bottomLeftCorner(Index cRows, Index cCols) const +inline const Block bottomLeftCorner(Index cRows, Index cCols) const { - return Block(derived(), rows() - cRows, 0, cRows, cCols); + return Block(derived(), rows() - cRows, 0, cRows, cCols); } /** \returns an expression of a fixed-size bottom-left corner of *this. @@ -248,9 +254,9 @@ inline Block bottomLeftCorner() /** This is the const version of bottomLeftCorner().*/ template -inline const Block bottomLeftCorner() const +inline const Block bottomLeftCorner() const { - return Block(derived(), rows() - CRows, 0); + return Block(derived(), rows() - CRows, 0); } @@ -270,9 +276,9 @@ inline RowsBlockXpr topRows(Index n) } /** This is the const version of topRows(Index).*/ -inline const RowsBlockXpr topRows(Index n) const +inline ConstRowsBlockXpr topRows(Index n) const { - return RowsBlockXpr(derived(), 0, 0, n, cols()); + return ConstRowsBlockXpr(derived(), 0, 0, n, cols()); } /** \returns a block consisting of the top rows of *this. @@ -292,9 +298,9 @@ inline typename NRowsBlockXpr::Type topRows() /** This is the const version of topRows().*/ template -inline const typename NRowsBlockXpr::Type topRows() const +inline typename ConstNRowsBlockXpr::Type topRows() const { - return typename NRowsBlockXpr::Type(derived(), 0, 0, N, cols()); + return typename ConstNRowsBlockXpr::Type(derived(), 0, 0, N, cols()); } @@ -314,9 +320,9 @@ inline RowsBlockXpr bottomRows(Index n) } /** This is the const version of bottomRows(Index).*/ -inline const RowsBlockXpr bottomRows(Index n) const +inline ConstRowsBlockXpr bottomRows(Index n) const { - return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); + return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols()); } /** \returns a block consisting of the bottom rows of *this. @@ -336,9 +342,9 @@ inline typename NRowsBlockXpr::Type bottomRows() /** This is the const version of bottomRows().*/ template -inline const typename NRowsBlockXpr::Type bottomRows() const +inline typename ConstNRowsBlockXpr::Type bottomRows() const { - return typename NRowsBlockXpr::Type(derived(), rows() - N, 0, N, cols()); + return typename ConstNRowsBlockXpr::Type(derived(), rows() - N, 0, N, cols()); } @@ -359,9 +365,9 @@ inline RowsBlockXpr middleRows(Index startRow, Index numRows) } /** This is the const version of middleRows(Index,Index).*/ -inline const RowsBlockXpr middleRows(Index startRow, Index numRows) const +inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const { - return RowsBlockXpr(derived(), startRow, 0, numRows, cols()); + return ConstRowsBlockXpr(derived(), startRow, 0, numRows, cols()); } /** \returns a block consisting of a range of rows of *this. @@ -382,9 +388,9 @@ inline typename NRowsBlockXpr::Type middleRows(Index startRow) /** This is the const version of middleRows().*/ template -inline const typename NRowsBlockXpr::Type middleRows(Index startRow) const +inline typename ConstNRowsBlockXpr::Type middleRows(Index startRow) const { - return typename NRowsBlockXpr::Type(derived(), startRow, 0, N, cols()); + return typename ConstNRowsBlockXpr::Type(derived(), startRow, 0, N, cols()); } @@ -404,9 +410,9 @@ inline ColsBlockXpr leftCols(Index n) } /** This is the const version of leftCols(Index).*/ -inline const ColsBlockXpr leftCols(Index n) const +inline ConstColsBlockXpr leftCols(Index n) const { - return ColsBlockXpr(derived(), 0, 0, rows(), n); + return ConstColsBlockXpr(derived(), 0, 0, rows(), n); } /** \returns a block consisting of the left columns of *this. @@ -426,9 +432,9 @@ inline typename NColsBlockXpr::Type leftCols() /** This is the const version of leftCols().*/ template -inline const typename NColsBlockXpr::Type leftCols() const +inline typename ConstNColsBlockXpr::Type leftCols() const { - return typename NColsBlockXpr::Type(derived(), 0, 0, rows(), N); + return typename ConstNColsBlockXpr::Type(derived(), 0, 0, rows(), N); } @@ -448,9 +454,9 @@ inline ColsBlockXpr rightCols(Index n) } /** This is the const version of rightCols(Index).*/ -inline const ColsBlockXpr rightCols(Index n) const +inline ConstColsBlockXpr rightCols(Index n) const { - return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); + return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n); } /** \returns a block consisting of the right columns of *this. @@ -470,9 +476,9 @@ inline typename NColsBlockXpr::Type rightCols() /** This is the const version of rightCols().*/ template -inline const typename NColsBlockXpr::Type rightCols() const +inline typename ConstNColsBlockXpr::Type rightCols() const { - return typename NColsBlockXpr::Type(derived(), 0, cols() - N, rows(), N); + return typename ConstNColsBlockXpr::Type(derived(), 0, cols() - N, rows(), N); } @@ -493,9 +499,9 @@ inline ColsBlockXpr middleCols(Index startCol, Index numCols) } /** This is the const version of middleCols(Index,Index).*/ -inline const ColsBlockXpr middleCols(Index startCol, Index numCols) const +inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const { - return ColsBlockXpr(derived(), 0, startCol, rows(), numCols); + return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols); } /** \returns a block consisting of a range of columns of *this. @@ -516,9 +522,9 @@ inline typename NColsBlockXpr::Type middleCols(Index startCol) /** This is the const version of middleCols().*/ template -inline const typename NColsBlockXpr::Type middleCols(Index startCol) const +inline typename ConstNColsBlockXpr::Type middleCols(Index startCol) const { - return typename NColsBlockXpr::Type(derived(), 0, startCol, rows(), N); + return typename ConstNColsBlockXpr::Type(derived(), 0, startCol, rows(), N); } @@ -547,9 +553,9 @@ inline Block block(Index startRow, Index startCol /** This is the const version of block<>(Index, Index). */ template -inline const Block block(Index startRow, Index startCol) const +inline const Block block(Index startRow, Index startCol) const { - return Block(derived(), startRow, startCol); + return Block(derived(), startRow, startCol); } /** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0. @@ -564,9 +570,9 @@ inline ColXpr col(Index i) } /** This is the const version of col(). */ -inline const ColXpr col(Index i) const +inline ConstColXpr col(Index i) const { - return ColXpr(derived(), i); + return ConstColXpr(derived(), i); } /** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0. @@ -581,9 +587,9 @@ inline RowXpr row(Index i) } /** This is the const version of row(). */ -inline const RowXpr row(Index i) const +inline ConstRowXpr row(Index i) const { - return RowXpr(derived(), i); + return ConstRowXpr(derived(), i); } #endif // EIGEN_BLOCKMETHODS_H diff --git a/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseBinaryOps.h b/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseBinaryOps.h index bb0b17c09..8f7765e72 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseBinaryOps.h +++ b/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseBinaryOps.h @@ -31,7 +31,7 @@ * * \sa class CwiseBinaryOp, operator-=() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator-,ei_scalar_difference_op) +EIGEN_MAKE_CWISE_BINARY_OP(operator-,internal::scalar_difference_op) /** \returns an expression of the sum of \c *this and \a other * @@ -39,7 +39,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator-,ei_scalar_difference_op) * * \sa class CwiseBinaryOp, operator+=() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator+,ei_scalar_sum_op) +EIGEN_MAKE_CWISE_BINARY_OP(operator+,internal::scalar_sum_op) /** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other * @@ -53,9 +53,9 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator+,ei_scalar_sum_op) * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct() */ template -EIGEN_STRONG_INLINE const CwiseBinaryOp +EIGEN_STRONG_INLINE const CwiseBinaryOp binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other, const CustomBinaryOp& func = CustomBinaryOp()) const { - return CwiseBinaryOp(derived(), other.derived(), func); + return CwiseBinaryOp(derived(), other.derived(), func); } diff --git a/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseUnaryOps.h b/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseUnaryOps.h index c16b177d8..941d5153c 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseUnaryOps.h +++ b/gtsam/3rdparty/Eigen/src/plugins/CommonCwiseUnaryOps.h @@ -28,34 +28,34 @@ #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a scalar multiple of an expression */ -typedef CwiseUnaryOp, Derived> ScalarMultipleReturnType; +typedef CwiseUnaryOp, const Derived> ScalarMultipleReturnType; /** \internal Represents a quotient of an expression by a scalar*/ -typedef CwiseUnaryOp, Derived> ScalarQuotient1ReturnType; +typedef CwiseUnaryOp, const Derived> ScalarQuotient1ReturnType; /** \internal the return type of conjugate() */ -typedef typename ei_meta_if::IsComplex, - const CwiseUnaryOp, Derived>, +typedef typename internal::conditional::IsComplex, + const CwiseUnaryOp, const Derived>, const Derived& - >::ret ConjugateReturnType; + >::type ConjugateReturnType; /** \internal the return type of real() const */ -typedef typename ei_meta_if::IsComplex, - const CwiseUnaryOp, Derived>, +typedef typename internal::conditional::IsComplex, + const CwiseUnaryOp, const Derived>, const Derived& - >::ret RealReturnType; + >::type RealReturnType; /** \internal the return type of real() */ -typedef typename ei_meta_if::IsComplex, - CwiseUnaryView, Derived>, +typedef typename internal::conditional::IsComplex, + CwiseUnaryView, Derived>, Derived& - >::ret NonConstRealReturnType; + >::type NonConstRealReturnType; /** \internal the return type of imag() const */ -typedef CwiseUnaryOp, Derived> ImagReturnType; +typedef CwiseUnaryOp, const Derived> ImagReturnType; /** \internal the return type of imag() */ -typedef CwiseUnaryView, Derived> NonConstImagReturnType; +typedef CwiseUnaryView, Derived> NonConstImagReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns an expression of the opposite of \c *this */ -inline const CwiseUnaryOp::Scalar>,Derived> +inline const CwiseUnaryOp::Scalar>, const Derived> operator-() const { return derived(); } @@ -63,8 +63,8 @@ operator-() const { return derived(); } inline const ScalarMultipleReturnType operator*(const Scalar& scalar) const { - return CwiseUnaryOp, Derived> - (derived(), ei_scalar_multiple_op(scalar)); + return CwiseUnaryOp, const Derived> + (derived(), internal::scalar_multiple_op(scalar)); } #ifdef EIGEN_PARSED_BY_DOXYGEN @@ -72,26 +72,26 @@ const ScalarMultipleReturnType operator*(const RealScalar& scalar) const; #endif /** \returns an expression of \c *this divided by the scalar value \a scalar */ -inline const CwiseUnaryOp::Scalar>, Derived> +inline const CwiseUnaryOp::Scalar>, const Derived> operator/(const Scalar& scalar) const { - return CwiseUnaryOp, Derived> - (derived(), ei_scalar_quotient1_op(scalar)); + return CwiseUnaryOp, const Derived> + (derived(), internal::scalar_quotient1_op(scalar)); } /** Overloaded for efficient real matrix times complex scalar value */ -inline const CwiseUnaryOp >, Derived> +inline const CwiseUnaryOp >, const Derived> operator*(const std::complex& scalar) const { - return CwiseUnaryOp >, Derived> - (*static_cast(this), ei_scalar_multiple2_op >(scalar)); + return CwiseUnaryOp >, const Derived> + (*static_cast(this), internal::scalar_multiple2_op >(scalar)); } inline friend const ScalarMultipleReturnType operator*(const Scalar& scalar, const StorageBaseType& matrix) { return matrix*scalar; } -inline friend const CwiseUnaryOp >, Derived> +inline friend const CwiseUnaryOp >, const Derived> operator*(const std::complex& scalar, const StorageBaseType& matrix) { return matrix*scalar; } @@ -103,7 +103,7 @@ operator*(const std::complex& scalar, const StorageBaseType& matrix) * \sa class CwiseUnaryOp */ template -typename ei_cast_return_type::Scalar, NewType>, Derived> >::type +typename internal::cast_return_type::Scalar, NewType>, const Derived> >::type cast() const { return derived(); @@ -130,10 +130,18 @@ real() const { return derived(); } inline const ImagReturnType imag() const { return derived(); } -/** \returns an expression of a custom coefficient-wise unary operator \a func of *this +/** \brief Apply a unary operator coefficient-wise + * \param[in] func Functor implementing the unary operator + * \tparam CustomUnaryOp Type of \a func + * \returns An expression of a custom coefficient-wise unary operator \a func of *this * - * The template parameter \a CustomUnaryOp is the type of the functor - * of the custom unary operator. + * The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions. + * + * Example: + * \include class_CwiseUnaryOp_ptrfun.cpp + * Output: \verbinclude class_CwiseUnaryOp_ptrfun.out + * + * Genuine functors allow for more possibilities, for instance it may contain a state. * * Example: * \include class_CwiseUnaryOp.cpp @@ -142,10 +150,10 @@ imag() const { return derived(); } * \sa class CwiseUnaryOp, class CwiseBinaryOp */ template -inline const CwiseUnaryOp +inline const CwiseUnaryOp unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const { - return CwiseUnaryOp(derived(), func); + return CwiseUnaryOp(derived(), func); } /** \returns an expression of a custom coefficient-wise unary operator \a func of *this @@ -160,10 +168,10 @@ unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const * \sa class CwiseUnaryOp, class CwiseBinaryOp */ template -inline const CwiseUnaryView +inline const CwiseUnaryView unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const { - return CwiseUnaryView(derived(), func); + return CwiseUnaryView(derived(), func); } /** \returns a non const expression of the real part of \c *this. diff --git a/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseBinaryOps.h index a1f673df5..35183f91f 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseBinaryOps.h +++ b/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseBinaryOps.h @@ -52,10 +52,10 @@ cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan() */ template -inline const CwiseBinaryOp, Derived, OtherDerived> +inline const CwiseBinaryOp, const Derived, const OtherDerived> cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise != operator of *this and \a other @@ -71,10 +71,10 @@ cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * \sa cwiseEqual(), isApprox(), isMuchSmallerThan() */ template -inline const CwiseBinaryOp, Derived, OtherDerived> +inline const CwiseBinaryOp, const Derived, const OtherDerived> cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of *this and \a other @@ -85,10 +85,10 @@ cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * \sa class CwiseBinaryOp, max() */ template -EIGEN_STRONG_INLINE const CwiseBinaryOp, Derived, OtherDerived> +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise max of *this and \a other @@ -99,10 +99,10 @@ cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * \sa class CwiseBinaryOp, min() */ template -EIGEN_STRONG_INLINE const CwiseBinaryOp, Derived, OtherDerived> +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise quotient of *this and \a other @@ -113,8 +113,8 @@ cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const * \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse() */ template -EIGEN_STRONG_INLINE const CwiseBinaryOp, Derived, OtherDerived> +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { - return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } diff --git a/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseUnaryOps.h index a33de85a5..a3d9a0e14 100644 --- a/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseUnaryOps.h +++ b/gtsam/3rdparty/Eigen/src/plugins/MatrixCwiseUnaryOps.h @@ -32,7 +32,7 @@ * * \sa cwiseAbs2() */ -EIGEN_STRONG_INLINE const CwiseUnaryOp,Derived> +EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> cwiseAbs() const { return derived(); } /** \returns an expression of the coefficient-wise squared absolute value of \c *this @@ -42,7 +42,7 @@ cwiseAbs() const { return derived(); } * * \sa cwiseAbs() */ -EIGEN_STRONG_INLINE const CwiseUnaryOp,Derived> +EIGEN_STRONG_INLINE const CwiseUnaryOp, const Derived> cwiseAbs2() const { return derived(); } /** \returns an expression of the coefficient-wise square root of *this. @@ -52,7 +52,7 @@ cwiseAbs2() const { return derived(); } * * \sa cwisePow(), cwiseSquare() */ -inline const CwiseUnaryOp,Derived> +inline const CwiseUnaryOp, const Derived> cwiseSqrt() const { return derived(); } /** \returns an expression of the coefficient-wise inverse of *this. @@ -62,7 +62,7 @@ cwiseSqrt() const { return derived(); } * * \sa cwiseProduct() */ -inline const CwiseUnaryOp,Derived> +inline const CwiseUnaryOp, const Derived> cwiseInverse() const { return derived(); } /** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s @@ -74,9 +74,9 @@ cwiseInverse() const { return derived(); } * * \sa cwiseEqual(const MatrixBase &) const */ -inline const CwiseUnaryOp >,Derived> +inline const CwiseUnaryOp >, const Derived> cwiseEqual(const Scalar& s) const { - return CwiseUnaryOp >,Derived> + return CwiseUnaryOp >,const Derived> (derived(), std::bind1st(std::equal_to(), s)); } diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 3f6444990..8298b32ee 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -132,12 +132,13 @@ namespace gtsam { throw invalid_argument("Cannot construct HessianFactor from JacobianFactor with constrained noise model"); else { Vector invsigmas = jf.model_->invsigmas(); + typedef Eigen::Map ConstEigenMap; typedef Eigen::Map EigenMap; - typedef typeof(EigenMap(&jf.matrix_(0,0),0,0).block(0,0,0,0)) EigenBlock; - EigenBlock A(EigenMap(&jf.matrix_(0,0),jf.matrix_.size1(),jf.matrix_.size2()).block( + typedef typeof(ConstEigenMap(&jf.matrix_(0,0),0,0).block(0,0,0,0)) EigenBlock; + EigenBlock A(ConstEigenMap(&jf.matrix_(0,0),jf.matrix_.size1(),jf.matrix_.size2()).block( jf.Ab_.rowStart(),jf.Ab_.offset(0), jf.Ab_.full().size1(), jf.Ab_.full().size2())); - typedef typeof(Eigen::Map(&invsigmas(0),0).asDiagonal()) EigenDiagonal; - EigenDiagonal R(Eigen::Map(&invsigmas(0),jf.model_->dim()).asDiagonal()); + typedef typeof(Eigen::Map(&invsigmas(0),0).asDiagonal()) EigenDiagonal; + EigenDiagonal R(Eigen::Map(&invsigmas(0),jf.model_->dim()).asDiagonal()); info_.copyStructureFrom(jf.Ab_); EigenMap L(EigenMap(&matrix_(0,0), matrix_.size1(), matrix_.size2())); L.noalias() = A.transpose() * R * R * A; @@ -242,7 +243,7 @@ void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatte } Eigen::Map information(&matrix_(0,0), matrix_.size1(), matrix_.size2()); - Eigen::Map updateInform(&update.matrix_(0,0), update.matrix_.size1(), update.matrix_.size2()); + Eigen::Map updateInform(&update.matrix_(0,0), update.matrix_.size1(), update.matrix_.size2()); // Apply updates to the upper triangle tic(3, "update"); @@ -302,7 +303,7 @@ void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatt } Eigen::Map information(&matrix_(0,0), matrix_.size1(), matrix_.size2()); - Eigen::Map updateAf(&update.matrix_(0,0), update.matrix_.size1(), update.matrix_.size2()); + Eigen::Map updateAf(&update.matrix_(0,0), update.matrix_.size1(), update.matrix_.size2()); Eigen::Block updateA(updateAf.block( update.Ab_.rowStart(),update.Ab_.offset(0), update.Ab_.full().size1(), update.Ab_.full().size2()));