From 22cce59f2c2b8c7f530120f0fb71aa117bc495ad Mon Sep 17 00:00:00 2001 From: Alex Cunningham Date: Sun, 28 Aug 2011 21:41:51 +0000 Subject: [PATCH] Updating Eigen to version 3.0.2 --- gtsam/3rdparty/Eigen/Eigen/Core | 7 - gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h | 4 +- .../Eigen/Eigen/src/Core/BandMatrix.h | 13 +- .../Eigen/Eigen/src/Core/CwiseNullaryOp.h | 2 +- .../3rdparty/Eigen/Eigen/src/Core/Diagonal.h | 2 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h | 8 +- .../3rdparty/Eigen/Eigen/src/Core/Functors.h | 8 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h | 4 +- .../Eigen/Eigen/src/Core/GenericPacketMath.h | 4 +- .../Eigen/Eigen/src/Core/MathFunctions.h | 8 +- .../Eigen/Eigen/src/Core/MatrixBase.h | 2 +- .../3rdparty/Eigen/Eigen/src/Core/NumTraits.h | 4 +- .../Eigen/Eigen/src/Core/PlainObjectBase.h | 8 +- .../Eigen/Eigen/src/Core/StableNorm.h | 10 +- .../3rdparty/Eigen/Eigen/src/Core/Transpose.h | 9 +- .../Eigen/Eigen/src/Core/TriangularMatrix.h | 17 +- .../Core/products/GeneralBlockPanelKernel.h | 2 +- .../src/Core/products/GeneralMatrixMatrix.h | 10 +- .../products/GeneralMatrixMatrixTriangular.h | 8 +- .../src/Core/products/GeneralMatrixVector.h | 4 +- .../Core/products/SelfadjointMatrixMatrix.h | 18 +- .../Core/products/SelfadjointMatrixVector.h | 2 +- .../Core/products/TriangularMatrixMatrix.h | 16 +- .../Core/products/TriangularMatrixVector.h | 4 +- .../Core/products/TriangularSolverMatrix.h | 8 +- .../Core/products/TriangularSolverVector.h | 4 +- .../Eigen/Eigen/src/Core/util/Macros.h | 4 +- .../Eigen/Eigen/src/Core/util/Memory.h | 14 +- .../Eigen/Eigen/src/Eigen2Support/Cwise.h | 8 +- .../Eigen/src/Eigen2Support/CwiseOperators.h | 18 - .../src/Eigen2Support/Geometry/AlignedBox.h | 30 +- .../Eigen/Eigen/src/Eigen2Support/SVD.h | 18 +- .../Eigen/src/Eigenvalues/ComplexSchur.h | 4 +- .../Eigen/Eigen/src/Eigenvalues/EigenSolver.h | 4 +- .../Eigen/Eigen/src/Eigenvalues/RealSchur.h | 4 +- .../src/Eigenvalues/SelfAdjointEigenSolver.h | 2 +- .../Eigen/Eigen/src/Geometry/AlignedBox.h | 14 +- .../Eigen/Eigen/src/Geometry/AngleAxis.h | 2 +- .../Eigen/Eigen/src/Geometry/Hyperplane.h | 2 +- .../Eigen/src/Geometry/ParametrizedLine.h | 6 +- gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h | 4 +- .../Eigen/Eigen/src/LU/PartialPivLU.h | 8 +- .../Eigen/Eigen/src/LU/arch/Inverse_SSE.h | 8 +- .../Eigen/Eigen/src/QR/ColPivHouseholderQR.h | 4 +- .../Eigen/Eigen/src/QR/FullPivHouseholderQR.h | 14 +- .../Eigen/Eigen/src/QR/HouseholderQR.h | 18 +- .../3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h | 8 +- .../Eigen/Eigen/src/Sparse/AmbiVector.h | 2 +- .../Eigen/src/Sparse/CompressedStorage.h | 2 +- .../Eigen/src/Sparse/DynamicSparseMatrix.h | 2 +- .../Eigen/Eigen/src/Sparse/SparseFuzzy.h | 2 +- .../Eigen/Eigen/src/Sparse/SparseMatrix.h | 2 +- .../Eigen/Eigen/src/Sparse/SparseMatrixBase.h | 4 +- .../Eigen/src/Sparse/SparseSelfAdjointView.h | 6 +- .../Eigen/src/Sparse/SparseSparseProduct.h | 6 +- .../Eigen/Eigen/src/StlSupport/StdVector.h | 18 +- gtsam/3rdparty/Eigen/debug/gdb/printers.py | 4 +- .../Eigen/doc/C00_QuickStartGuide.dox | 2 +- .../Eigen/doc/C03_TutorialArrayClass.dox | 2 +- ...TutorialReductionsVisitorsBroadcasting.dox | 4 +- .../Eigen/doc/C08_TutorialGeometry.dox | 8 +- .../3rdparty/Eigen/doc/C09_TutorialSparse.dox | 16 +- gtsam/3rdparty/Eigen/test/adjoint.cpp | 4 +- gtsam/3rdparty/Eigen/test/bandmatrix.cpp | 2 +- gtsam/3rdparty/Eigen/test/cwiseop.cpp | 8 + gtsam/3rdparty/Eigen/test/main.h | 5 +- .../Eigen/unsupported/Eigen/MPRealSupport | 18 +- .../Eigen/unsupported/Eigen/OpenGLSupport | 46 +- .../unsupported/Eigen/src/BVH/BVAlgorithms.h | 8 +- .../Eigen/src/FFT/ei_kissfft_impl.h | 8 + .../IterativeSolvers/ConstrainedConjGrad.h | 4 +- .../IterativeSolvers/IterationController.h | 2 +- .../MatrixFunctions/MatrixFunctionAtomic.h | 4 +- .../HybridNonLinearSolver.h | 30 +- .../LevenbergMarquardt.h | 12 +- .../Eigen/src/NonLinearOptimization/dogleg.h | 2 +- .../Eigen/src/NonLinearOptimization/fdjac1.h | 4 +- .../Eigen/src/NonLinearOptimization/lmpar.h | 28 +- .../Eigen/src/NonLinearOptimization/r1updt.h | 7 +- .../Eigen/src/NumericalDiff/NumericalDiff.h | 2 +- .../Eigen/src/Skyline/SkylineStorage.h | 10 +- .../Eigen/src/SparseExtra/UmfPackSupport.h | 4 +- gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp | 8 +- .../Eigen/unsupported/test/CMakeLists.txt | 2 +- .../3rdparty/Eigen/unsupported/test/FFTW.cpp | 2 +- .../Eigen/unsupported/test/mpreal/dlmalloc.c | 5703 +++++++++++++++ .../Eigen/unsupported/test/mpreal/dlmalloc.h | 562 ++ .../unsupported/test/{ => mpreal}/mpreal.cpp | 915 +-- .../unsupported/test/{ => mpreal}/mpreal.h | 6337 +++++++++-------- .../Eigen/unsupported/test/mpreal_support.cpp | 5 +- 90 files changed, 10324 insertions(+), 3878 deletions(-) create mode 100755 gtsam/3rdparty/Eigen/unsupported/test/mpreal/dlmalloc.c create mode 100755 gtsam/3rdparty/Eigen/unsupported/test/mpreal/dlmalloc.h rename gtsam/3rdparty/Eigen/unsupported/test/{ => mpreal}/mpreal.cpp (66%) rename gtsam/3rdparty/Eigen/unsupported/test/{ => mpreal}/mpreal.h (91%) diff --git a/gtsam/3rdparty/Eigen/Eigen/Core b/gtsam/3rdparty/Eigen/Eigen/Core index 2ad4dee25..6e855427c 100644 --- a/gtsam/3rdparty/Eigen/Eigen/Core +++ b/gtsam/3rdparty/Eigen/Eigen/Core @@ -175,13 +175,6 @@ #include #endif -// this needs to be done after all possible windows C header includes and before any Eigen source includes -// (system C++ includes are supposed to be able to deal with this already): -// windows.h defines min and max macros which would make Eigen fail to compile. -#if defined(min) || defined(max) -#error The preprocessor symbols 'min' or 'max' are defined. If you are compiling on Windows, do #define NOMINMAX to prevent windows.h from defining these symbols. -#endif - // defined in bits/termios.h #undef B0 diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h index a8fc525e8..a4ee5b11c 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Cholesky/LLT.h @@ -233,7 +233,7 @@ template<> struct llt_inplace Index blockSize = size/8; blockSize = (blockSize/16)*16; - blockSize = std::min(std::max(blockSize,Index(8)), Index(128)); + blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); for (Index k=0; k struct llt_inplace // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 - Index bs = std::min(blockSize, size-k); + Index bs = (std::min)(blockSize, size-k); Index rs = size - k - bs; Block A11(m,k, k, bs,bs); Block A21(m,k+bs,k, rs,bs); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h index dda8efba3..2570d7b55 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/BandMatrix.h @@ -87,7 +87,7 @@ class BandMatrixBase : public EigenBase if (i<=supers()) { start = supers()-i; - len = std::min(rows(),std::max(0,coeffs().rows() - (supers()-i))); + len = (std::min)(rows(),std::max(0,coeffs().rows() - (supers()-i))); } else if (i>=rows()-subs()) len = std::max(0,coeffs().rows() - (i + 1 - rows() + subs())); @@ -96,11 +96,11 @@ class BandMatrixBase : public EigenBase /** \returns a vector expression of the main diagonal */ inline Block diagonal() - { return Block(coeffs(),supers(),0,1,std::min(rows(),cols())); } + { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } /** \returns a vector expression of the main diagonal (const version) */ inline const Block diagonal() const - { return Block(coeffs(),supers(),0,1,std::min(rows(),cols())); } + { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } template struct DiagonalIntReturnType { enum { @@ -122,13 +122,13 @@ class BandMatrixBase : public EigenBase /** \returns a vector expression of the \a N -th sub or super diagonal */ template inline typename DiagonalIntReturnType::Type diagonal() { - return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N)); + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a N -th sub or super diagonal */ template inline const typename DiagonalIntReturnType::Type diagonal() const { - return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N)); + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ @@ -166,7 +166,7 @@ class BandMatrixBase : public EigenBase protected: inline Index diagonalLength(Index i) const - { return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); } + { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); } }; /** @@ -284,6 +284,7 @@ class BandMatrixWrapper : public BandMatrixBase static EIGEN_STRONG_INLINE Derived& run(Derived& m) { m.setZero(); - const Index size = std::min(m.rows(), m.cols()); + const Index size = (std::min)(m.rows(), m.cols()); for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); return m; } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Diagonal.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Diagonal.h index e807a49e4..61d3b063a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Diagonal.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Diagonal.h @@ -87,7 +87,7 @@ template class Diagonal EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) inline Index rows() const - { return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); } + { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); } inline Index cols() const { return 1; } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h index 6e83191c5..42da78498 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h @@ -116,7 +116,9 @@ MatrixBase::eigen2_dot(const MatrixBase& other) const //---------- implementation of L2 norm and related functions ---------- -/** \returns the squared \em l2 norm of *this, i.e., for vectors, the dot product of *this with itself. +/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm. + * In both cases, it consists in the sum of the square of all the matrix entries. + * For vectors, this is also equals to the dot product of \c *this with itself. * * \sa dot(), norm() */ @@ -126,7 +128,9 @@ EIGEN_STRONG_INLINE typename NumTraits::Scala return internal::real((*this).cwiseAbs2().sum()); } -/** \returns the \em l2 norm of *this, i.e., for vectors, the square root of the dot product of *this with itself. +/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. + * In both cases, it consists in the square root of the sum of the square of all the matrix entries. + * For vectors, this is also equals to the square root of the dot product of \c *this with itself. * * \sa dot(), squaredNorm() */ diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h index e319c978e..54636e0d4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h @@ -116,7 +116,7 @@ struct functor_traits > { */ template struct scalar_min_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) - EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return min(a, b); } + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmin(a,b); } @@ -139,7 +139,7 @@ struct functor_traits > { */ template struct scalar_max_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) - EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return max(a, b); } + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); } template EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const { return internal::pmax(a,b); } @@ -167,8 +167,8 @@ template struct scalar_hypot_op { { using std::max; using std::min; - Scalar p = max(_x, _y); - Scalar q = min(_x, _y); + Scalar p = (max)(_x, _y); + Scalar q = (min)(_x, _y); Scalar qp = q/p; return p * sqrt(Scalar(1) + qp*qp); } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h index 6eaa26be8..1926d6ab4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Fuzzy.h @@ -37,7 +37,7 @@ struct isApprox_selector using std::min; const typename internal::nested::type nested(x); const typename internal::nested::type otherNested(y); - return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * min(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; @@ -94,7 +94,7 @@ struct isMuchSmallerThan_scalar_selector * * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ * are considered to be approximately equal within precision \f$ p \f$ if - * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] + * \f[ \Vert v - w \Vert \leqslant p\,\(min)(\Vert v\Vert, \Vert w\Vert). \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm * L2 norm). * diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h index 1366a63c1..8ed835327 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h @@ -134,12 +134,12 @@ pdiv(const Packet& a, /** \internal \returns the min of \a a and \a b (coeff-wise) */ template inline Packet pmin(const Packet& a, - const Packet& b) { using std::min; return min(a, b); } + const Packet& b) { using std::min; return (min)(a, b); } /** \internal \returns the max of \a a and \a b (coeff-wise) */ template inline Packet pmax(const Packet& a, - const Packet& b) { using std::max; return max(a, b); } + const Packet& b) { using std::max; return (max)(a, b); } /** \internal \returns the absolute value of \a a */ template inline Packet diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h index c80d30e35..2b454db21 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h @@ -378,8 +378,8 @@ struct hypot_impl using std::min; RealScalar _x = abs(x); RealScalar _y = abs(y); - RealScalar p = max(_x, _y); - RealScalar q = min(_x, _y); + RealScalar p = (max)(_x, _y); + RealScalar q = (min)(_x, _y); RealScalar qp = q/p; return p * sqrt(RealScalar(1) + qp*qp); } @@ -737,7 +737,7 @@ struct scalar_fuzzy_default_impl static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { using std::min; - return abs(x - y) <= min(abs(x), abs(y)) * prec; + return abs(x - y) <= (min)(abs(x), abs(y)) * prec; } static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { @@ -776,7 +776,7 @@ struct scalar_fuzzy_default_impl static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { using std::min; - return abs2(x - y) <= min(abs2(x), abs2(y)) * prec * prec; + return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec; } }; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h index f0c7fc7a1..db156f6e9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/MatrixBase.h @@ -111,7 +111,7 @@ template class MatrixBase /** \returns the size of the main diagonal, which is min(rows(),cols()). * \sa rows(), cols(), SizeAtCompileTime. */ - inline Index diagonalSize() const { return std::min(rows(),cols()); } + inline Index diagonalSize() const { return (std::min)(rows(),cols()); } /** \brief The plain matrix type corresponding to this expression. * diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h index 5c7762dae..73ef05dfe 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/NumTraits.h @@ -87,8 +87,8 @@ template struct GenericNumTraits // make sure to override this for floating-point types return Real(0); } - inline static T highest() { return std::numeric_limits::max(); } - inline static T lowest() { return IsInteger ? std::numeric_limits::min() : (-std::numeric_limits::max()); } + inline static T highest() { return (std::numeric_limits::max)(); } + inline static T lowest() { return IsInteger ? (std::numeric_limits::min)() : (-(std::numeric_limits::max)()); } #ifdef EIGEN2_SUPPORT enum { diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h index 5358cb572..ed34b0ed9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h @@ -647,8 +647,8 @@ struct internal::conservative_resize_like_impl { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(rows,cols); - const Index common_rows = std::min(rows, _this.rows()); - const Index common_cols = std::min(cols, _this.cols()); + const Index common_rows = (std::min)(rows, _this.rows()); + const Index common_cols = (std::min)(cols, _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } @@ -681,8 +681,8 @@ struct internal::conservative_resize_like_impl { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(other); - const Index common_rows = std::min(tmp.rows(), _this.rows()); - const Index common_cols = std::min(tmp.cols(), _this.cols()); + const Index common_rows = (std::min)(tmp.rows(), _this.rows()); + const Index common_cols = (std::min)(tmp.cols(), _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h index e65943ad7..f667272e4 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/StableNorm.h @@ -69,7 +69,7 @@ MatrixBase::stableNorm() const if (bi>0) internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale); for (; bisegment(bi,min(blockSize, n - bi)).template forceAlignedAccessIf(), ssq, scale, invScale); + internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf(), ssq, scale, invScale); return scale * internal::sqrt(ssq); } @@ -103,12 +103,12 @@ MatrixBase::blueNorm() const // For portability, the PORT subprograms "ilmaeh" and "rlmach" // are used. For any specific computer, each of the assignment // statements can be replaced - nbig = std::numeric_limits::max(); // largest integer + nbig = (std::numeric_limits::max)(); // largest integer ibeta = std::numeric_limits::radix; // base for floating-point numbers it = std::numeric_limits::digits; // number of base-beta digits in mantissa iemin = std::numeric_limits::min_exponent; // minimum exponent iemax = std::numeric_limits::max_exponent; // maximum exponent - rbig = std::numeric_limits::max(); // largest floating-point number + rbig = (std::numeric_limits::max)(); // largest floating-point number iexp = -((1-iemin)/2); b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange @@ -167,8 +167,8 @@ MatrixBase::blueNorm() const } else return internal::sqrt(amed); - asml = min(abig, amed); - abig = max(abig, amed); + asml = (min)(abig, amed); + abig = (max)(abig, amed); if(asml <= abig*relerr) return abig; else diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h index b521f9319..3f7c7df6e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h @@ -350,15 +350,14 @@ struct blas_traits > template struct check_transpose_aliasing_compile_time_selector { - enum { ret = blas_traits::IsTransposed != DestIsTransposed - }; + enum { ret = bool(blas_traits::IsTransposed) != DestIsTransposed }; }; template struct check_transpose_aliasing_compile_time_selector > { - enum { ret = blas_traits::IsTransposed != DestIsTransposed - || blas_traits::IsTransposed != DestIsTransposed + enum { ret = bool(blas_traits::IsTransposed) != DestIsTransposed + || bool(blas_traits::IsTransposed) != DestIsTransposed }; }; @@ -367,7 +366,7 @@ struct check_transpose_aliasing_run_time_selector { static bool run(const Scalar* dest, const OtherDerived& src) { - return (blas_traits::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src)); + return (bool(blas_traits::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src)); } }; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h index c04fa0c32..033e81036 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/TriangularMatrix.h @@ -111,6 +111,7 @@ template class TriangularBase : public EigenBase EIGEN_ONLY_USED_FOR_DEBUG(col); eigen_assert(col>=0 && col=0 && row=row) || (mode==Lower && col<=row) || ((mode==StrictlyUpper || mode==UnitUpper) && col>row) @@ -491,7 +492,7 @@ struct triangular_assignment_selector(0); @@ -527,7 +528,7 @@ struct triangular_assignment_selector(0); @@ -563,7 +564,7 @@ struct triangular_assignment_selector::isUpperTriangular(RealScalar prec) const RealScalar maxAbsOnUpperPart = static_cast(-1); for(Index j = 0; j < cols(); ++j) { - Index maxi = std::min(j, rows()-1); + Index maxi = (std::min)(j, rows()-1); for(Index i = 0; i <= maxi; ++i) { RealScalar absValue = internal::abs(coeff(i,j)); @@ -827,7 +828,7 @@ bool MatrixBase::isLowerTriangular(RealScalar prec) const RealScalar threshold = maxAbsOnLowerPart * prec; for(Index j = 1; j < cols(); ++j) { - Index maxi = std::min(j, rows()-1); + Index maxi = (std::min)(j, rows()-1); for(Index i = 0; i < maxi; ++i) if(internal::abs(coeff(i, j)) > threshold) return false; } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h index 2116dcc74..6f3f27170 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -81,6 +81,7 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdi template void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n) { + EIGEN_UNUSED_VARIABLE(n); // Explanations: // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed @@ -102,7 +103,6 @@ void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrd k = std::min(k, l1/kdiv); std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0; if(_m diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h index d33f00b20..ae94a2795 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -78,7 +78,7 @@ static void run(Index rows, Index cols, Index depth, typedef gebp_traits Traits; Index kc = blocking.kc(); // cache block size along the K direction - Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs pack_lhs; @@ -103,7 +103,7 @@ static void run(Index rows, Index cols, Index depth, // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k rows of B', and cols of the A' + const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. @@ -140,7 +140,7 @@ static void run(Index rows, Index cols, Index depth, // Then keep going as usual with the remaining A' for(Index i=mc; i Pack rhs's panel into a sequential chunk of memory (L2 caching) @@ -187,7 +187,7 @@ static void run(Index rows, Index cols, Index depth, // (==GEPP_VAR1) for(Index i2=0; i2 processed with a special kernel // 3 - after the diagonal => processed with gebp or skipped if (UpLo==Lower) - gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, std::min(size,i2), alpha, + gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha, -1, -1, 0, 0, allocatedBlockB); sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB); @@ -120,7 +120,7 @@ struct general_matrix_matrix_triangular_product(kc, mc, nc); // kc must smaller than mc - kc = std::min(kc,mc); + kc = (std::min)(kc,mc); std::size_t sizeW = kc*Traits::WorkSpaceFactor; std::size_t sizeB = sizeW + kc*cols; @@ -276,7 +276,7 @@ struct product_selfadjoint_matrix generic packed copy for(Index i2=0; i2() (blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc); @@ -352,14 +352,14 @@ struct product_selfadjoint_matrix GEPP for(Index i2=0; i20 : k2 GEPP { Index start = IsLower ? k2 : 0; - Index end = IsLower ? rows : std::min(actual_k2,rows); + Index end = IsLower ? rows : (std::min)(actual_k2,rows); for(Index i2=start; i2() (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc); @@ -240,7 +240,7 @@ struct product_triangular_matrix_matrix0; IsLower ? k2+=kc : k2-=kc) { - Index actual_kc = std::min(IsLower ? depth-k2 : k2, kc); + Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc); Index actual_k2 = IsLower ? k2 : k2-actual_kc; // align blocks with the end of the triangular part for trapezoidal rhs @@ -286,7 +286,7 @@ struct product_triangular_matrix_matrix=cols) ? 0 : actual_kc; @@ -327,7 +327,7 @@ struct product_triangular_matrix_matrix0; IsLower ? k2+=kc : k2-=kc) { - const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc); + const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc); // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel, // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into @@ -164,7 +164,7 @@ struct triangular_solve_matrix0) { pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc); @@ -222,7 +222,7 @@ struct triangular_solve_matrix0 : k20; IsLower ? pi+=PanelWidth : pi-=PanelWidth) { - Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); + Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth); Index r = IsLower ? pi : size - pi; // remaining size if (r > 0) @@ -114,7 +114,7 @@ struct triangular_solve_vector0; IsLower ? pi+=PanelWidth : pi-=PanelWidth) { - Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); + Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth); Index startBlock = IsLower ? pi : pi-actualPanelWidth; Index endBlock = IsLower ? pi + actualPanelWidth : 0; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h index 4e56a006c..6c3f1e421 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h @@ -28,7 +28,7 @@ #define EIGEN_WORLD_VERSION 3 #define EIGEN_MAJOR_VERSION 0 -#define EIGEN_MINOR_VERSION 1 +#define EIGEN_MINOR_VERSION 2 #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ @@ -399,7 +399,7 @@ #define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \ template \ EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> \ - METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ + (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ { \ return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); \ } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h index e84c664a9..a580b95ad 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h @@ -156,7 +156,7 @@ inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size) if (ptr != 0) { - std::memcpy(newptr, ptr, std::min(size,old_size)); + std::memcpy(newptr, ptr, (std::min)(size,old_size)); aligned_free(ptr); } @@ -494,12 +494,12 @@ template class aligned_stack_memory_handler aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc) : m_ptr(ptr), m_size(size), m_deallocate(dealloc) { - if(NumTraits::RequireInitialization) + if(NumTraits::RequireInitialization && m_ptr) Eigen::internal::construct_elements_of_array(m_ptr, size); } ~aligned_stack_memory_handler() { - if(NumTraits::RequireInitialization) + if(NumTraits::RequireInitialization && m_ptr) Eigen::internal::destruct_elements_of_array(m_ptr, m_size); if(m_deallocate) Eigen::internal::aligned_free(m_ptr); @@ -663,12 +663,12 @@ public: size_type max_size() const throw() { - return std::numeric_limits::max(); + return (std::numeric_limits::max)(); } - pointer allocate( size_type num, const_pointer* hint = 0 ) + pointer allocate( size_type num, const void* hint = 0 ) { - static_cast( hint ); // suppress unused variable warning + EIGEN_UNUSED_VARIABLE(hint); return static_cast( internal::aligned_malloc( num * sizeof(T) ) ); } @@ -903,7 +903,7 @@ inline int queryTopLevelCacheSize() { int l1, l2(-1), l3(-1); queryCacheSizes(l1,l2,l3); - return std::max(l2,l3); + return (std::max)(l2,l3); } } // end namespace internal diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h index c619d389c..2dc83b6a7 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Cwise.h @@ -82,13 +82,17 @@ template class Cwise const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op) operator/(const MatrixBase &other) const; + /** \deprecated ArrayBase::min() */ template const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op) - min(const MatrixBase &other) const; + (min)(const MatrixBase &other) const + { return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); } + /** \deprecated ArrayBase::max() */ template const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op) - max(const MatrixBase &other) const; + (max)(const MatrixBase &other) const + { return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); } const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const; const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h index 0c7e9db6d..9c28559c3 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/CwiseOperators.h @@ -96,24 +96,6 @@ inline ExpressionType& Cwise::operator/=(const MatrixBase -template -EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op) -Cwise::min(const MatrixBase &other) const -{ - return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); -} - -/** \deprecated ArrayBase::max() */ -template -template -EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op) -Cwise::max(const MatrixBase &other) const -{ - return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); -} - /*************************************************************************** * The following functions were defined in Array ***************************************************************************/ diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h index 1c915be22..6ad846d35 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/Geometry/AlignedBox.h @@ -51,14 +51,14 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim== { if (AmbientDimAtCompileTime!=Dynamic) setNull(); } /** Constructs a null box with \a _dim the dimension of the ambient space. */ - inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim) + inline explicit AlignedBox(int _dim) : m_(min)(_dim), m_(max)(_dim) { setNull(); } /** Constructs a box with extremities \a _min and \a _max. */ - inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {} + inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_(min)(_min), m_(max)(_max) {} /** Constructs a box containing a single point \a p. */ - inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {} + inline explicit AlignedBox(const VectorType& p) : m_(min)(p), m_(max)(p) {} ~AlignedBox() {} @@ -71,18 +71,18 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim== /** Makes \c *this a null/empty box. */ inline void setNull() { - m_min.setConstant( std::numeric_limits::max()); - m_max.setConstant(-std::numeric_limits::max()); + m_min.setConstant( std::numeric_limits::(max)()); + m_max.setConstant(-std::numeric_limits::(max)()); } /** \returns the minimal corner */ - inline const VectorType& min() const { return m_min; } + inline const VectorType& (min)() const { return m_min; } /** \returns a non const reference to the minimal corner */ - inline VectorType& min() { return m_min; } + inline VectorType& (min)() { return m_min; } /** \returns the maximal corner */ - inline const VectorType& max() const { return m_max; } + inline const VectorType& (max)() const { return m_max; } /** \returns a non const reference to the maximal corner */ - inline VectorType& max() { return m_max; } + inline VectorType& (max)() { return m_max; } /** \returns true if the point \a p is inside the box \c *this. */ inline bool contains(const VectorType& p) const @@ -90,19 +90,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim== /** \returns true if the box \a b is entirely inside the box \c *this. */ inline bool contains(const AlignedBox& b) const - { return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); } + { return (m_min.cwise()<=b.(min)()).all() && (b.(max)().cwise()<=m_max).all(); } /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ inline AlignedBox& extend(const VectorType& p) - { m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; } + { m_min = m_min.cwise().(min)(p); m_max = m_max.cwise().(max)(p); return *this; } /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */ inline AlignedBox& extend(const AlignedBox& b) - { m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; } + { m_min = m_min.cwise().(min)(b.m_min); m_max = m_max.cwise().(max)(b.m_max); return *this; } /** Clamps \c *this by the box \a b and returns a reference to \c *this. */ inline AlignedBox& clamp(const AlignedBox& b) - { m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; } + { m_min = m_min.cwise().(max)(b.m_min); m_max = m_max.cwise().(min)(b.m_max); return *this; } /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ inline AlignedBox& translate(const VectorType& t) @@ -138,8 +138,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim== template inline explicit AlignedBox(const AlignedBox& other) { - m_min = other.min().template cast(); - m_max = other.max().template cast(); + m_min = other.(min)().template cast(); + m_max = other.(max)().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h index 528a0dcd0..16b4b488f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigen2Support/SVD.h @@ -64,9 +64,9 @@ template class SVD SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7 SVD(const MatrixType& matrix) - : m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())), + : m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())), m_matV(matrix.cols(),matrix.cols()), - m_sigma(std::min(matrix.rows(),matrix.cols())) + m_sigma((std::min)(matrix.rows(),matrix.cols())) { compute(matrix); } @@ -108,13 +108,13 @@ void SVD::compute(const MatrixType& matrix) { const int m = matrix.rows(); const int n = matrix.cols(); - const int nu = std::min(m,n); + const int nu = (std::min)(m,n); ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!"); ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices"); m_matU.resize(m, nu); m_matU.setZero(); - m_sigma.resize(std::min(m,n)); + m_sigma.resize((std::min)(m,n)); m_matV.resize(n,n); RowVector e(n); @@ -126,9 +126,9 @@ void SVD::compute(const MatrixType& matrix) // Reduce A to bidiagonal form, storing the diagonal elements // in s and the super-diagonal elements in e. - int nct = std::min(m-1,n); - int nrt = std::max(0,std::min(n-2,m)); - for (k = 0; k < std::max(nct,nrt); ++k) + int nct = (std::min)(m-1,n); + int nrt = (std::max)(0,(std::min)(n-2,m)); + for (k = 0; k < (std::max)(nct,nrt); ++k) { if (k < nct) { @@ -193,7 +193,7 @@ void SVD::compute(const MatrixType& matrix) // Set up the final bidiagonal matrix or order p. - int p = std::min(n,m+1); + int p = (std::min)(n,m+1); if (nct < n) m_sigma[nct] = matA(nct,nct); if (m < p) @@ -380,7 +380,7 @@ void SVD::compute(const MatrixType& matrix) case 3: { // Calculate the shift. - Scalar scale = std::max(std::max(std::max(std::max( + Scalar scale = (std::max)((std::max)((std::max)((std::max)( ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])), ei_abs(m_sigma[k])),ei_abs(e[k])); Scalar sp = m_sigma[p-1]/scale; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h index b1830f642..ec93af2e5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/ComplexSchur.h @@ -423,7 +423,7 @@ void ComplexSchur::reduceToTriangularForm(bool computeU) JacobiRotation rot; rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il)); m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint()); - m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot); + m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot); if(computeU) m_matU.applyOnTheRight(il, il+1, rot); for(Index i=il+1 ; i::reduceToTriangularForm(bool computeU) rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1)); m_matT.coeffRef(i+1,i-1) = ComplexScalar(0); m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint()); - m_matT.topRows(std::min(i+2,iu)+1).applyOnTheRight(i, i+1, rot); + m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot); if(computeU) m_matU.applyOnTheRight(i, i+1, rot); } } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h index 9b133a010..ac4c4242d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/EigenSolver.h @@ -435,7 +435,7 @@ void EigenSolver::doComputeEigenvectors() Scalar norm = 0.0; for (Index j = 0; j < size; ++j) { - norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum(); + norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); } // Backsubstitute to find vectors of upper triangular form @@ -564,7 +564,7 @@ void EigenSolver::doComputeEigenvectors() // Overflow control using std::max; - Scalar t = max(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n))); + Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n))); if ((eps * t) * t > Scalar(1)) m_matT.block(i, n-1, size-i, 2) /= t; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h index c61ae444c..cc9af11c1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/RealSchur.h @@ -290,7 +290,7 @@ inline typename MatrixType::Scalar RealSchur::computeNormOfT() // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); Scalar norm = 0.0; for (Index j = 0; j < size; ++j) - norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum(); + norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum(); return norm; } @@ -442,7 +442,7 @@ inline void RealSchur::performFrancisQRStep(Index il, Index im, Inde // These Householder transformations form the O(n^3) part of the algorithm m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace); - m_matT.block(0, k, std::min(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace); + m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace); if (computeU) m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace); } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index eeab325cc..965dda88b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -387,7 +387,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver { m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); if(computeEigenvectors) - m_eivec.setOnes(); + m_eivec.setOnes(n,n); m_info = Success; m_isInitialized = true; m_eigenvectorsOk = computeEigenvectors; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h index d81dcad9e..b51deb3f3 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AlignedBox.h @@ -111,13 +111,13 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) } /** \returns the minimal corner */ - inline const VectorType& min() const { return m_min; } + inline const VectorType& (min)() const { return m_min; } /** \returns a non const reference to the minimal corner */ - inline VectorType& min() { return m_min; } + inline VectorType& (min)() { return m_min; } /** \returns the maximal corner */ - inline const VectorType& max() const { return m_max; } + inline const VectorType& (max)() const { return m_max; } /** \returns a non const reference to the maximal corner */ - inline VectorType& max() { return m_max; } + inline VectorType& (max)() { return m_max; } /** \returns the center of the box */ inline const CwiseUnaryOp, @@ -196,7 +196,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) /** \returns true if the box \a b is entirely inside the box \c *this. */ inline bool contains(const AlignedBox& b) const - { return (m_min.array()<=b.min().array()).all() && (b.max().array()<=m_max.array()).all(); } + { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); } /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ template @@ -287,8 +287,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) template inline explicit AlignedBox(const AlignedBox& other) { - m_min = other.min().template cast(); - m_max = other.max().template cast(); + m_min = (other.min)().template cast(); + m_max = (other.max)().template cast(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h index 0a7046608..0ec4624cf 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/AngleAxis.h @@ -182,7 +182,7 @@ AngleAxis& AngleAxis::operator=(const QuaternionBase - Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); + Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; /** \returns \c *this with scalar type casted to \a NewScalarType * @@ -159,7 +159,7 @@ inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const H */ template template -inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) +inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return -(hyperplane.offset()+hyperplane.normal().dot(origin())) / hyperplane.normal().dot(direction()); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h index 339d7845c..633fb23fd 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/FullPivLU.h @@ -533,7 +533,7 @@ template MatrixType FullPivLU::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LU is not initialized."); - const Index smalldim = std::min(m_lu.rows(), m_lu.cols()); + const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols()); // LU MatrixType res(m_lu.rows(),m_lu.cols()); // FIXME the .toDenseMatrix() should not be needed... @@ -695,7 +695,7 @@ struct solve_retval, Rhs> const Index rows = dec().rows(), cols = dec().cols(), nonzero_pivots = dec().nonzeroPivots(); eigen_assert(rhs().rows() == rows); - const Index smalldim = std::min(rows, cols); + const Index smalldim = (std::min)(rows, cols); if(nonzero_pivots == 0) { diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h index 4cae510b0..09394b01f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/PartialPivLU.h @@ -253,7 +253,7 @@ struct partial_lu_impl { const Index rows = lu.rows(); const Index cols = lu.cols(); - const Index size = std::min(rows,cols); + const Index size = (std::min)(rows,cols); nb_transpositions = 0; int first_zero_pivot = -1; for(Index k = 0; k < size; ++k) @@ -313,7 +313,7 @@ struct partial_lu_impl MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols); MatrixType lu(lu1,0,0,rows,cols); - const Index size = std::min(rows,cols); + const Index size = (std::min)(rows,cols); // if the matrix is too small, no blocking: if(size<=16) @@ -327,14 +327,14 @@ struct partial_lu_impl { blockSize = size/8; blockSize = (blockSize/16)*16; - blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize); + blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize); } nb_transpositions = 0; int first_zero_pivot = -1; for(Index k = 0; k < size; k+=blockSize) { - Index bs = std::min(size-k,blockSize); // actual size of the block + Index bs = (std::min)(size-k,blockSize); // actual size of the block Index trows = rows - k - bs; // trailing rows Index tsize = size - k - bs; // trailing size diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h index 0fe9be388..176c349ce 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h @@ -182,8 +182,8 @@ struct compute_inverse_size4 }; static void run(const MatrixType& matrix, ResultType& result) { - const EIGEN_ALIGN16 long long int _Sign_NP[2] = { 0x8000000000000000ll, 0x0000000000000000ll }; - const EIGEN_ALIGN16 long long int _Sign_PN[2] = { 0x0000000000000000ll, 0x8000000000000000ll }; + const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); + const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); // The inverse is calculated using "Divide and Conquer" technique. The // original matrix is divide into four 2x2 sub-matrices. Since each @@ -316,8 +316,8 @@ struct compute_inverse_size4 iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1); iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2); - d1 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_PN)); - d2 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_NP)); + d1 = _mm_xor_pd(rd, _Sign_PN); + d2 = _mm_xor_pd(rd, _Sign_NP); // iC = B*|C| - A*C#*D; dC = _mm_shuffle_pd(dC,dC,0); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h index 0cdcaa1cb..f04c6038d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h @@ -93,7 +93,7 @@ template class ColPivHouseholderQR */ ColPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), - m_hCoeffs(std::min(rows,cols)), + m_hCoeffs((std::min)(rows,cols)), m_colsPermutation(cols), m_colsTranspositions(cols), m_temp(cols), @@ -103,7 +103,7 @@ template class ColPivHouseholderQR ColPivHouseholderQR(const MatrixType& matrix) : m_qr(matrix.rows(), matrix.cols()), - m_hCoeffs(std::min(matrix.rows(),matrix.cols())), + m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), m_colsPermutation(matrix.cols()), m_colsTranspositions(matrix.cols()), m_temp(matrix.cols()), diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h index 7f1d98c54..dde3013be 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h @@ -93,21 +93,21 @@ template class FullPivHouseholderQR */ FullPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), - m_hCoeffs(std::min(rows,cols)), + m_hCoeffs((std::min)(rows,cols)), m_rows_transpositions(rows), m_cols_transpositions(cols), m_cols_permutation(cols), - m_temp(std::min(rows,cols)), + m_temp((std::min)(rows,cols)), m_isInitialized(false), m_usePrescribedThreshold(false) {} FullPivHouseholderQR(const MatrixType& matrix) : m_qr(matrix.rows(), matrix.cols()), - m_hCoeffs(std::min(matrix.rows(), matrix.cols())), + m_hCoeffs((std::min)(matrix.rows(), matrix.cols())), m_rows_transpositions(matrix.rows()), m_cols_transpositions(matrix.cols()), m_cols_permutation(matrix.cols()), - m_temp(std::min(matrix.rows(), matrix.cols())), + m_temp((std::min)(matrix.rows(), matrix.cols())), m_isInitialized(false), m_usePrescribedThreshold(false) { @@ -379,7 +379,7 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons { Index rows = matrix.rows(); Index cols = matrix.cols(); - Index size = std::min(rows,cols); + Index size = (std::min)(rows,cols); m_qr = matrix; m_hCoeffs.resize(size); @@ -493,7 +493,7 @@ struct solve_retval, Rhs> RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff(); RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff(); // FIXME brain dead - const RealScalar m_precision = NumTraits::epsilon() * std::min(rows,cols); + const RealScalar m_precision = NumTraits::epsilon() * (std::min)(rows,cols); // this internal:: prefix is needed by at least gcc 3.4 and ICC if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision)) return; @@ -520,7 +520,7 @@ typename FullPivHouseholderQR::MatrixQType FullPivHouseholderQR temp(rows); for (Index k = size-1; k >= 0; k--) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h index 0d2b74893..9ee96de26 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h @@ -88,13 +88,13 @@ template class HouseholderQR */ HouseholderQR(Index rows, Index cols) : m_qr(rows, cols), - m_hCoeffs(std::min(rows,cols)), + m_hCoeffs((std::min)(rows,cols)), m_temp(cols), m_isInitialized(false) {} HouseholderQR(const MatrixType& matrix) : m_qr(matrix.rows(), matrix.cols()), - m_hCoeffs(std::min(matrix.rows(),matrix.cols())), + m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), m_temp(matrix.cols()), m_isInitialized(false) { @@ -210,7 +210,7 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename typedef typename MatrixQR::RealScalar RealScalar; Index rows = mat.rows(); Index cols = mat.cols(); - Index size = std::min(rows,cols); + Index size = (std::min)(rows,cols); eigen_assert(hCoeffs.size() == size); @@ -250,7 +250,7 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, Index rows = mat.rows(); Index cols = mat.cols(); - Index size = std::min(rows, cols); + Index size = (std::min)(rows, cols); typedef Matrix TempType; TempType tempVector; @@ -260,12 +260,12 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, tempData = tempVector.data(); } - Index blockSize = std::min(maxBlockSize,size); + Index blockSize = (std::min)(maxBlockSize,size); - int k = 0; + Index k = 0; for (k = 0; k < size; k += blockSize) { - Index bs = std::min(size-k,blockSize); // actual size of the block + Index bs = (std::min)(size-k,blockSize); // actual size of the block Index tcols = cols - k - bs; // trailing columns Index brows = rows-k; // rows of the block @@ -299,7 +299,7 @@ struct solve_retval, Rhs> template void evalTo(Dest& dst) const { const Index rows = dec().rows(), cols = dec().cols(); - const Index rank = std::min(rows, cols); + const Index rank = (std::min)(rows, cols); eigen_assert(rhs().rows() == rows); typename Rhs::PlainObject c(rhs()); @@ -327,7 +327,7 @@ HouseholderQR& HouseholderQR::compute(const MatrixType& { Index rows = matrix.rows(); Index cols = matrix.cols(); - Index size = std::min(rows,cols); + Index size = (std::min)(rows,cols); m_qr = matrix; m_hCoeffs.resize(size); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h index 5539a72dc..5f6139998 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h @@ -569,7 +569,7 @@ void JacobiSVD::allocate(Index rows, Index cols, u "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " "Use the ColPivHouseholderQR preconditioner instead."); } - m_diagSize = std::min(m_rows, m_cols); + m_diagSize = (std::min)(m_rows, m_cols); m_singularValues.resize(m_diagSize); m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize @@ -619,8 +619,8 @@ JacobiSVD::compute(const MatrixType& matrix, unsig // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // keep us iterating forever. using std::max; - if(max(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) - > max(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision) + if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) + > (max)(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision) { finished = false; @@ -689,7 +689,7 @@ struct solve_retval, Rhs> // A = U S V^* // So A^{-1} = V S^{-1} U^* - Index diagSize = std::min(dec().rows(), dec().cols()); + Index diagSize = (std::min)(dec().rows(), dec().cols()); typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize); Index nonzeroSingVals = dec().nonzeroSingularValues(); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h index 01c93fbd7..2ea8ba309 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/AmbiVector.h @@ -97,7 +97,7 @@ class AmbiVector void reallocateSparse() { Index copyElements = m_allocatedElements; - m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size); + m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); Index allocSize = m_allocatedElements * sizeof(ListEl); allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); Scalar* newBuffer = new Scalar[allocSize]; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h index 1c36a2632..b3bde272e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/CompressedStorage.h @@ -216,7 +216,7 @@ class CompressedStorage { Scalar* newValues = new Scalar[size]; Index* newIndices = new Index[size]; - size_t copySize = std::min(size, m_size); + size_t copySize = (std::min)(size, m_size); // copy memcpy(newValues, m_values, copySize * sizeof(Scalar)); memcpy(newIndices, m_indices, copySize * sizeof(Index)); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h index abbc995cc..93e75f4c6 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/DynamicSparseMatrix.h @@ -141,7 +141,7 @@ class DynamicSparseMatrix { if (outerSize()>0) { - Index reserveSizePerVector = std::max(reserveSize/outerSize(),Index(4)); + Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4)); for (Index j=0; j::type nested(derived()); // const typename internal::nested::type otherNested(other.derived()); // return (nested - otherNested).cwise().abs2().sum() -// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); +// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); // } #endif // EIGEN_SPARSE_FUZZY_H diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h index 9e7802736..0e175ec6e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrix.h @@ -257,7 +257,7 @@ class SparseMatrix // furthermore we bound the realloc ratio to: // 1) reduce multiple minor realloc when the matrix is almost filled // 2) avoid to allocate too much memory when the matrix is almost empty - reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f); + reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); } } m_data.resize(m_data.size()+1,reallocRatio); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h index 8695f7343..c01981bc9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseMatrixBase.h @@ -223,7 +223,7 @@ template class SparseMatrixBase : public EigenBase // thanks to shallow copies, we always eval to a tempary Derived temp(other.rows(), other.cols()); - temp.reserve(std::max(this->rows(),this->cols())*2); + temp.reserve((std::max)(this->rows(),this->cols())*2); for (Index j=0; j class SparseMatrixBase : public EigenBase // eval without temporary derived().resize(other.rows(), other.cols()); derived().setZero(); - derived().reserve(std::max(this->rows(),this->cols())*2); + derived().reserve((std::max)(this->rows(),this->cols())*2); for (Index j=0; jjp)) dest._valuePtr()[k] = conj(it.value()); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h index cade6fd54..19abcd1f8 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Sparse/SparseSparseProduct.h @@ -45,7 +45,7 @@ static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res // estimate the number of non zero entries float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); - float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); + float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f); // int t200 = rows/(log2(200)*1.39); // int t = (rows*100)/139; @@ -131,7 +131,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) // estimate the number of non zero entries float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); - float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); + float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f); // mimics a resizeByInnerOuter: if(ResultType::IsRowMajor) @@ -143,7 +143,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) for (Index j=0; j >; -#else - #define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...) -#endif - /** * This section contains a convenience MACRO which allows an easy specialization of * std::vector such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \ -EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(__VA_ARGS__) \ namespace std \ { \ - template \ - class vector<__VA_ARGS__, _Ay> \ + template<> \ + class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \ public: \ typedef __VA_ARGS__ value_type; \ - typedef typename vector_base::allocator_type allocator_type; \ - typedef typename vector_base::size_type size_type; \ - typedef typename vector_base::iterator iterator; \ + typedef vector_base::allocator_type allocator_type; \ + typedef vector_base::size_type size_type; \ + typedef vector_base::iterator iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \ diff --git a/gtsam/3rdparty/Eigen/debug/gdb/printers.py b/gtsam/3rdparty/Eigen/debug/gdb/printers.py index 8c991e956..02823b8a2 100644 --- a/gtsam/3rdparty/Eigen/debug/gdb/printers.py +++ b/gtsam/3rdparty/Eigen/debug/gdb/printers.py @@ -56,12 +56,12 @@ class EigenMatrixPrinter: template_params = m.split(',') template_params = map(lambda x:x.replace(" ", ""), template_params) - if template_params[1] == '-0x00000000000000001': + if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001': self.rows = val['m_storage']['m_rows'] else: self.rows = int(template_params[1]) - if template_params[2] == '-0x00000000000000001': + if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001': self.cols = val['m_storage']['m_cols'] else: self.cols = int(template_params[2]) diff --git a/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox b/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox index 62b7f6061..ad772b2e1 100644 --- a/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox +++ b/gtsam/3rdparty/Eigen/doc/C00_QuickStartGuide.dox @@ -63,7 +63,7 @@ The output is as follows: The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics. -The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contains \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows: +The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows: \f[ v = diff --git a/gtsam/3rdparty/Eigen/doc/C03_TutorialArrayClass.dox b/gtsam/3rdparty/Eigen/doc/C03_TutorialArrayClass.dox index 7d9e35b45..a1d8d6985 100644 --- a/gtsam/3rdparty/Eigen/doc/C03_TutorialArrayClass.dox +++ b/gtsam/3rdparty/Eigen/doc/C03_TutorialArrayClass.dox @@ -155,7 +155,7 @@ this doesn't have any runtime cost (provided that you let your compiler optimize Both \link MatrixBase::array() .array() \endlink and \link ArrayBase::matrix() .matrix() \endlink can be used as rvalues and as lvalues. -Mixing matrices and arrays in an expression is forbidden with Eigen. For instance, you cannot add a amtrix and +Mixing matrices and arrays in an expression is forbidden with Eigen. For instance, you cannot add a matrix and array directly; the operands of a \c + operator should either both be matrices or both be arrays. However, it is easy to convert from one to the other with \link MatrixBase::array() .array() \endlink and \link ArrayBase::matrix() .matrix()\endlink. The exception to this rule is the assignment operator: it is diff --git a/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox b/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox index 44e963424..e58ff6e2c 100644 --- a/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox +++ b/gtsam/3rdparty/Eigen/doc/C07_TutorialReductionsVisitorsBroadcasting.dox @@ -93,7 +93,7 @@ Array. The arguments passed to a visitor are pointers to the variables where the row and column position are to be stored. These variables should be of type -\link DenseBase::Index Index \endlink (FIXME: link ok?), as shown below: +\link DenseBase::Index Index \endlink, as shown below: @@ -141,7 +141,7 @@ return a 'column-vector' \subsection TutorialReductionsVisitorsBroadcastingPartialReductionsCombined Combining partial reductions with other operations It is also possible to use the result of a partial reduction to do further processing. -Here is another example that aims to find the column whose sum of elements is the maximum +Here is another example that finds the column whose sum of elements is the maximum within a matrix. With column-wise partial reductions this can be coded as:
Example:Output:
diff --git a/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox b/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox index 983152ca5..aea0b6642 100644 --- a/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox +++ b/gtsam/3rdparty/Eigen/doc/C08_TutorialGeometry.dox @@ -6,7 +6,7 @@ namespace Eigen { \li \b Previous: \ref TutorialReductionsVisitorsBroadcasting \li \b Next: \ref TutorialSparse -In this tutorial, we will shortly introduce the many possibilities offered by the \ref Geometry_Module "geometry module", namely 2D and 3D rotations and projective or affine transformations. +In this tutorial, we will briefly introduce the many possibilities offered by the \ref Geometry_Module "geometry module", namely 2D and 3D rotations and projective or affine transformations. \b Table \b of \b contents - \ref TutorialGeoElementaryTransformations @@ -78,7 +78,7 @@ representations are rotation matrices, while for other usages Quaternion is the representation of choice as they are compact, fast and stable. Finally Rotation2D and AngleAxis are mainly convenient types to create other rotation objects. -Notes on Translation and Scaling\n Likewise AngleAxis, these classes were +Notes on Translation and Scaling\n Like AngleAxis, these classes were designed to simplify the creation/initialization of linear (Matrix) and affine (Transform) transformations. Nevertheless, unlike AngleAxis which is inefficient to use, these classes might still be interesting to write generic and efficient algorithms taking as input any @@ -186,7 +186,7 @@ matNxN = t.extractRotation(); While transformation objects can be created and updated concatenating elementary transformations, the Transform class also features a procedural API:
- +
procedurale APIequivalent natural API
procedural APIequivalent natural API
Translation\code t.translate(Vector_(tx,ty,..)); t.pretranslate(Vector_(tx,ty,..)); @@ -234,7 +234,7 @@ t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling_(..);
Euler angles might be convenient to create rotation objects. -On the other hand, since there exist 24 differents convension,they are pretty confusing to use. This example shows how +On the other hand, since there exist 24 different conventions, they are pretty confusing to use. This example shows how to create a rotation matrix according to the 2-1-2 convention.\code Matrix3f m; m = AngleAxisf(angle1, Vector3f::UnitZ()) diff --git a/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox b/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox index da32e3c0e..047ba7af2 100644 --- a/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox +++ b/gtsam/3rdparty/Eigen/doc/C09_TutorialSparse.dox @@ -55,17 +55,17 @@ and its internal representation using the Compressed Column Storage format:
Outer indices:
02456\em 7
-As you can guess, here the storage order is even more important than with dense matrix. We will therefore often make a clear difference between the \em inner and \em outer dimensions. For instance, it is easy to loop over the coefficients of an \em inner \em vector (e.g., a column of a column-major matrix), but completely inefficient to do the same for an \em outer \em vector (e.g., a row of a col-major matrix). +As you might guess, here the storage order is even more important than with dense matrices. We will therefore often make a clear difference between the \em inner and \em outer dimensions. For instance, it is efficient to loop over the coefficients of an \em inner \em vector (e.g., a column of a column-major matrix), but completely inefficient to do the same for an \em outer \em vector (e.g., a row of a column-major matrix). The SparseVector class implements the same compressed storage scheme but, of course, without any outer index buffer. -Since all nonzero coefficients of such a matrix are sequentially stored in memory, random insertion of new nonzeros can be extremely costly. To overcome this limitation, Eigen's sparse module provides a DynamicSparseMatrix class which is basically implemented as an array of SparseVector. In other words, a DynamicSparseMatrix is a SparseMatrix where the values and inner-indices arrays have been splitted into multiple small and resizable arrays. Assuming the number of nonzeros per inner vector is relatively low, this slight modification allow for very fast random insertion at the cost of a slight memory overhead and a lost of compatibility with other sparse libraries used by some of our highlevel solvers. Note that the major memory overhead comes from the extra memory preallocated by each inner vector to avoid an expensive memory reallocation at every insertion. +Since all nonzero coefficients of such a matrix are sequentially stored in memory, inserting a new nonzero near the "beginning" of the matrix can be extremely costly. As described below (\ref TutorialSparseFilling), one strategy is to fill nonzero coefficients in order. In cases where this is not possible, Eigen's sparse module also provides a DynamicSparseMatrix class which allows efficient random insertion. DynamicSparseMatrix is essentially implemented as an array of SparseVector, where the values and inner-indices arrays have been split into multiple small and resizable arrays. Assuming the number of nonzeros per inner vector is relatively small, this modification allows for very fast random insertion at the cost of a slight memory overhead (due to extra memory preallocated by each inner vector to avoid an expensive memory reallocation at every insertion) and a loss of compatibility with other sparse libraries used by some of our high-level solvers. Once complete, a DynamicSparseMatrix can be converted to a SparseMatrix to permit usage of these sparse libraries. -To summarize, it is recommanded to use a SparseMatrix whenever this is possible, and reserve the use of DynamicSparseMatrix for matrix assembly purpose when a SparseMatrix is not flexible enough. The respective pro/cons of both representations are summarized in the following table: +To summarize, it is recommended to use SparseMatrix whenever possible, and reserve the use of DynamicSparseMatrix to assemble a sparse matrix in cases when a SparseMatrix is not flexible enough. The respective pros/cons of both representations are summarized in the following table: - + @@ -82,7 +82,7 @@ To summarize, it is recommanded to use a SparseMatrix whenever this is possible, \b Matrix \b and \b vector \b properties \n -Here mat and vec represents any sparse-matrix and sparse-vector types respectively. +Here mat and vec represent any sparse-matrix and sparse-vector type, respectively.
SparseMatrixDynamicSparseMatrix
memory usage*****
memory efficiency*****
sorted insertion******
random insertion \n in sorted inner vector****
sorted insertion \n in random inner vector-***
- @@ -105,12 +105,12 @@ vec.nonZeros() \endcode \b Iterating \b over \b the \b nonzero \b coefficients \n -Iterating over the coefficients of a sparse matrix can be done only in the same order than the storage order. Here is an example: +Iterating over the coefficients of a sparse matrix can be done only in the same order as the storage order. Here is an example:
Standard \n dimensions\code @@ -96,7 +96,7 @@ mat.innerSize() mat.outerSize()\endcode
Number of non \n zero coefficiens\code +
Number of non \n zero coefficients\code mat.nonZeros() \endcode \code vec.nonZeros() \endcode
\code SparseMatrixType mat(rows,cols); -for (int k=0; k\ void adjoint(const MatrixType& m) // check basic properties of dot, norm, norm2 typedef typename NumTraits::Real RealScalar; - RealScalar ref = NumTraits::IsInteger ? 0 : std::max((s1 * v1 + s2 * v2).norm(),v3.norm()); + RealScalar ref = NumTraits::IsInteger ? 0 : (std::max)((s1 * v1 + s2 * v2).norm(),v3.norm()); VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), internal::conj(s1) * v1.dot(v3) + internal::conj(s2) * v2.dot(v3), ref)); VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), ref)); VERIFY_IS_APPROX(internal::conj(v1.dot(v2)), v2.dot(v1)); @@ -76,7 +76,7 @@ template void adjoint(const MatrixType& m) // check compatibility of dot and adjoint - ref = NumTraits::IsInteger ? 0 : std::max(std::max(v1.norm(),v2.norm()),std::max((square * v2).norm(),(square.adjoint() * v1).norm())); + ref = NumTraits::IsInteger ? 0 : (std::max)((std::max)(v1.norm(),v2.norm()),(std::max)((square * v2).norm(),(square.adjoint() * v1).norm())); VERIFY(test_isApproxWithRef(v1.dot(square * v2), (square.adjoint() * v1).dot(v2), ref)); // like in testBasicStuff, test operator() to check const-qualification diff --git a/gtsam/3rdparty/Eigen/test/bandmatrix.cpp b/gtsam/3rdparty/Eigen/test/bandmatrix.cpp index 84fc387e0..996886f43 100644 --- a/gtsam/3rdparty/Eigen/test/bandmatrix.cpp +++ b/gtsam/3rdparty/Eigen/test/bandmatrix.cpp @@ -61,7 +61,7 @@ template void bandmatrix(const MatrixType& _m) m.col(i).setConstant(static_cast(i+1)); dm1.col(i).setConstant(static_cast(i+1)); } - Index d = std::min(rows,cols); + Index d = (std::min)(rows,cols); Index a = std::max(0,cols-d-supers); Index b = std::max(0,rows-d-subs); if(a>0) dm1.block(0,d+supers,rows,a).setZero(); diff --git a/gtsam/3rdparty/Eigen/test/cwiseop.cpp b/gtsam/3rdparty/Eigen/test/cwiseop.cpp index c63ace1cc..07ef599be 100644 --- a/gtsam/3rdparty/Eigen/test/cwiseop.cpp +++ b/gtsam/3rdparty/Eigen/test/cwiseop.cpp @@ -28,6 +28,14 @@ #include "main.h" #include +#ifdef min +#undef min +#endif + +#ifdef max +#undef max +#endif + using namespace std; template struct AddIfNull { diff --git a/gtsam/3rdparty/Eigen/test/main.h b/gtsam/3rdparty/Eigen/test/main.h index aa133d5fa..a8c6303b0 100644 --- a/gtsam/3rdparty/Eigen/test/main.h +++ b/gtsam/3rdparty/Eigen/test/main.h @@ -23,6 +23,9 @@ // License and a copy of the GNU General Public License along with // Eigen. If not, see . +#define min(A,B) please_protect_your_min_with_parentheses +#define max(A,B) please_protect_your_max_with_parentheses + #include #include #include @@ -429,7 +432,7 @@ void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typenam MatrixBType b = MatrixBType::Random(cols,cols); // set the diagonal such that only desired_rank non-zero entries reamain - const Index diag_size = std::min(d.rows(),d.cols()); + const Index diag_size = (std::min)(d.rows(),d.cols()); if(diag_size != desired_rank) d.diagonal().segment(desired_rank, diag_size-desired_rank) = VectorType::Zero(diag_size-desired_rank); diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport b/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport index 6bded0c2c..10fa23b35 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/MPRealSupport @@ -34,7 +34,7 @@ #include namespace Eigen { - + /** \defgroup MPRealSupport_Module MPFRC++ Support module * * \code @@ -45,6 +45,8 @@ namespace Eigen { * via the MPFR C++ * library which itself is built upon MPFR/GMP. * + * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder. + * * Here is an example: * \code @@ -129,18 +131,6 @@ int main() return a + (b-a) * random(); } - template<> struct conj_impl { inline static const mpfr::mpreal& run(const mpfr::mpreal& x) { return x; } }; - template<> struct real_impl { inline static const mpfr::mpreal& run(const mpfr::mpreal& x) { return x; } }; - template<> struct imag_impl { inline static const mpfr::mpreal run(const mpfr::mpreal&) { return mpfr::mpreal(0); } }; - template<> struct abs_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::fabs(x); } }; - template<> struct abs2_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return x*x; } }; - template<> struct sqrt_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::sqrt(x); } }; - template<> struct exp_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::exp(x); } }; - template<> struct log_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::log(x); } }; - template<> struct sin_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::sin(x); } }; - template<> struct cos_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::cos(x); } }; - template<> struct pow_impl { inline static const mpfr::mpreal run(const mpfr::mpreal& x, const mpfr::mpreal& y) { return mpfr::pow(x, y); } }; - bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec) { return mpfr::abs(a) <= mpfr::abs(b) * prec; @@ -148,7 +138,7 @@ int main() inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec) { - return mpfr::abs(a - b) <= mpfr::min(mpfr::abs(a), mpfr::abs(b)) * prec; + return mpfr::abs(a - b) <= (mpfr::min)(mpfr::abs(a), mpfr::abs(b)) * prec; } inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/OpenGLSupport b/gtsam/3rdparty/Eigen/unsupported/Eigen/OpenGLSupport index 0f09cee14..e5938e21c 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/OpenGLSupport +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/OpenGLSupport @@ -188,11 +188,11 @@ template void glLoadMatrix(const Transform& t) template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } template void glLoadMatrix(const Transform& t) { glLoadMatrix(Transform(t).matrix()); } -void glRotate(const Rotation2D& rot) +static void glRotate(const Rotation2D& rot) { glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f); } -void glRotate(const Rotation2D& rot) +static void glRotate(const Rotation2D& rot) { glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0); } @@ -256,18 +256,18 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev) #ifdef GL_VERSION_2_0 -void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } -void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } +static void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } +static void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } -void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } -void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } +static void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } +static void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } -void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } -void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } +static void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } +static void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } -void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } -void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } -void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } +static void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } +static void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } +static void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) @@ -286,12 +286,12 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix #ifdef GL_VERSION_2_1 -void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); } -void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); } -void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); } -void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); } -void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); } -void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); } +static void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); } +static void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); } +static void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); } +static void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); } +static void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); } +static void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); } EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei) @@ -304,9 +304,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix #ifdef GL_VERSION_3_0 -void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } -void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } -void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } +static void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } +static void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } +static void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) @@ -315,9 +315,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei) #endif #ifdef GL_ARB_gpu_shader_fp64 -void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } -void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } -void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } +static void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } +static void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } +static void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h index 2412680e7..e6fdf4737 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h @@ -178,7 +178,7 @@ typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer todo.pop(); for(; oBegin != oEnd; ++oBegin) //go through child objects - minimum = std::min(minimum, minimizer.minimumOnObject(*oBegin)); + minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin)); for(; vBegin != vEnd; ++vBegin) { //go through child volumes Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin)); @@ -274,12 +274,12 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree - minimum = std::min(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2)); + minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2)); } for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree Helper2 helper(*oBegin1, minimizer); - minimum = std::min(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum)); + minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum)); } } @@ -288,7 +288,7 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree Helper1 helper(*oCur2, minimizer); - minimum = std::min(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum)); + minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum)); } for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h index 667638f40..04b98b083 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h @@ -294,11 +294,19 @@ struct kissfft_impl inline void fwd2( Complex * dst,const Complex *src,int n0,int n1) { + EIGEN_UNUSED_VARIABLE(dst); + EIGEN_UNUSED_VARIABLE(src); + EIGEN_UNUSED_VARIABLE(n0); + EIGEN_UNUSED_VARIABLE(n1); } inline void inv2( Complex * dst,const Complex *src,int n0,int n1) { + EIGEN_UNUSED_VARIABLE(dst); + EIGEN_UNUSED_VARIABLE(src); + EIGEN_UNUSED_VARIABLE(n0); + EIGEN_UNUSED_VARIABLE(n1); } // real-to-complex forward FFT diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h index f4851d316..4d8e183ee 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h @@ -172,7 +172,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n"; if (transition || iter.first()) gamma = 0.0; - else gamma = std::max(0.0, (rho - old_z.dot(z)) / rho_1); + else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1); p = z + gamma*p; ++iter; @@ -185,7 +185,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, { Scalar bb = C.row(i).dot(p) - f[i]; if (bb > 0.0) - lambda = std::min(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb); + lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb); } } x += lambda * p; diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h index 385d8c546..a65793cd5 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h @@ -141,7 +141,7 @@ class IterationController bool converged(double nr) { m_res = internal::abs(nr); - m_resminreach = std::min(m_resminreach, m_res); + m_resminreach = (std::min)(m_resminreach, m_res); return converged(); } template bool converged(const VectorType &v) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h index 87dc64ce1..d08766921 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h @@ -127,10 +127,10 @@ bool MatrixFunctionAtomic::taylorConverged(Index s, const MatrixType for (Index r = 0; r < n; r++) { RealScalar mx = 0; for (Index i = 0; i < n; i++) - mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast(s+r)))); + mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast(s+r)))); if (r != 0) rfactorial *= RealScalar(r); - delta = std::max(delta, mx / rfactorial); + delta = (std::max)(delta, mx / rfactorial); } const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff(); if (m_mu * delta * P_norm < NumTraits::epsilon() * F_norm) diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h index a4863700e..37abb6117 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h @@ -37,7 +37,7 @@ namespace HybridNonLinearSolverSpace { TolTooSmall = 3, NotMakingProgressJacobian = 4, NotMakingProgressIterations = 5, - UserAksed = 6 + UserAsked = 6 }; } @@ -181,7 +181,7 @@ HybridNonLinearSolver::solveInit(FVectorType &x) /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) - return HybridNonLinearSolverSpace::UserAksed; + return HybridNonLinearSolverSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ @@ -207,7 +207,7 @@ HybridNonLinearSolver::solveOneStep(FVectorType &x) /* calculate the jacobian matrix. */ if ( functor.df(x, fjac) < 0) - return HybridNonLinearSolverSpace::UserAksed; + return HybridNonLinearSolverSpace::UserAsked; ++njev; wa2 = fjac.colwise().blueNorm(); @@ -228,7 +228,6 @@ HybridNonLinearSolver::solveOneStep(FVectorType &x) } /* compute the qr factorization of the jacobian. */ - wa2 = fjac.colwise().blueNorm(); HouseholderQR qrfac(fjac); // no pivoting: /* copy the triangular factor of the qr factorization into r. */ @@ -255,11 +254,11 @@ HybridNonLinearSolver::solveOneStep(FVectorType &x) /* on the first iteration, adjust the initial step bound. */ if (iter == 1) - delta = std::min(delta,pnorm); + delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) - return HybridNonLinearSolverSpace::UserAksed; + return HybridNonLinearSolverSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); @@ -289,7 +288,7 @@ HybridNonLinearSolver::solveOneStep(FVectorType &x) ncfail = 0; ++ncsuc; if (ratio >= Scalar(.5) || ncsuc > 1) - delta = std::max(delta, pnorm / Scalar(.5)); + delta = (std::max)(delta, pnorm / Scalar(.5)); if (internal::abs(ratio - 1.) <= Scalar(.1)) { delta = pnorm / Scalar(.5); } @@ -322,7 +321,7 @@ HybridNonLinearSolver::solveOneStep(FVectorType &x) /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; - if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) + if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) return HybridNonLinearSolverSpace::TolTooSmall; if (nslow2 == 5) return HybridNonLinearSolverSpace::NotMakingProgressJacobian; @@ -420,7 +419,7 @@ HybridNonLinearSolver::solveNumericalDiffInit(FVectorType & /* and calculate its norm. */ nfev = 1; if ( functor(x, fvec) < 0) - return HybridNonLinearSolverSpace::UserAksed; + return HybridNonLinearSolverSpace::UserAsked; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ @@ -448,8 +447,8 @@ HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType /* calculate the jacobian matrix. */ if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0) - return HybridNonLinearSolverSpace::UserAksed; - nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n); + return HybridNonLinearSolverSpace::UserAsked; + nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n); wa2 = fjac.colwise().blueNorm(); @@ -469,7 +468,6 @@ HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType } /* compute the qr factorization of the jacobian. */ - wa2 = fjac.colwise().blueNorm(); HouseholderQR qrfac(fjac); // no pivoting: /* copy the triangular factor of the qr factorization into r. */ @@ -496,11 +494,11 @@ HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType /* on the first iteration, adjust the initial step bound. */ if (iter == 1) - delta = std::min(delta,pnorm); + delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) - return HybridNonLinearSolverSpace::UserAksed; + return HybridNonLinearSolverSpace::UserAsked; ++nfev; fnorm1 = wa4.stableNorm(); @@ -530,7 +528,7 @@ HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType ncfail = 0; ++ncsuc; if (ratio >= Scalar(.5) || ncsuc > 1) - delta = std::max(delta, pnorm / Scalar(.5)); + delta = (std::max)(delta, pnorm / Scalar(.5)); if (internal::abs(ratio - 1.) <= Scalar(.1)) { delta = pnorm / Scalar(.5); } @@ -563,7 +561,7 @@ HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType /* tests for termination and stringent tolerances. */ if (nfev >= parameters.maxfev) return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; - if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) + if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) return HybridNonLinearSolverSpace::TolTooSmall; if (nslow2 == 5) return HybridNonLinearSolverSpace::NotMakingProgressJacobian; diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h index c43df3f7a..0ae681b1c 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h @@ -263,7 +263,7 @@ LevenbergMarquardt::minimizeOneStep(FVectorType &x) if (fnorm != 0.) for (Index j = 0; j < n; ++j) if (wa2[permutation.indices()[j]] != 0.) - gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); + gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (gnorm <= parameters.gtol) @@ -285,7 +285,7 @@ LevenbergMarquardt::minimizeOneStep(FVectorType &x) /* on the first iteration, adjust the initial step bound. */ if (iter == 1) - delta = std::min(delta,pnorm); + delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) @@ -321,7 +321,7 @@ LevenbergMarquardt::minimizeOneStep(FVectorType &x) if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) temp = Scalar(.1); /* Computing MIN */ - delta = temp * std::min(delta, pnorm / Scalar(.1)); + delta = temp * (std::min)(delta, pnorm / Scalar(.1)); par /= temp; } else if (!(par != 0. && ratio < Scalar(.75))) { delta = pnorm / Scalar(.5); @@ -510,7 +510,7 @@ LevenbergMarquardt::minimizeOptimumStorageOneStep(FVectorTyp if (fnorm != 0.) for (j = 0; j < n; ++j) if (wa2[permutation.indices()[j]] != 0.) - gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); + gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); /* test for convergence of the gradient norm. */ if (gnorm <= parameters.gtol) @@ -532,7 +532,7 @@ LevenbergMarquardt::minimizeOptimumStorageOneStep(FVectorTyp /* on the first iteration, adjust the initial step bound. */ if (iter == 1) - delta = std::min(delta,pnorm); + delta = (std::min)(delta,pnorm); /* evaluate the function at x + p and calculate its norm. */ if ( functor(wa2, wa4) < 0) @@ -568,7 +568,7 @@ LevenbergMarquardt::minimizeOptimumStorageOneStep(FVectorTyp if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) temp = Scalar(.1); /* Computing MIN */ - delta = temp * std::min(delta, pnorm / Scalar(.1)); + delta = temp * (std::min)(delta, pnorm / Scalar(.1)); par /= temp; } else if (!(par != 0. && ratio < Scalar(.75))) { delta = pnorm / Scalar(.5); diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h index fffd9e0be..cbdcf4b71 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h @@ -93,7 +93,7 @@ algo_end: /* form appropriate convex combination of the gauss-newton */ /* direction and the scaled gradient direction. */ - temp = (1.-alpha) * std::min(sgnorm,delta); + temp = (1.-alpha) * (std::min)(sgnorm,delta); x = temp * wa1 + alpha * x; } diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h index 887b76fa1..0a26c2061 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h @@ -26,7 +26,7 @@ DenseIndex fdjac1( Matrix< Scalar, Dynamic, 1 > wa1(n); Matrix< Scalar, Dynamic, 1 > wa2(n); - eps = sqrt(std::max(epsfcn,epsmch)); + eps = sqrt((std::max)(epsfcn,epsmch)); msum = ml + mu + 1; if (msum >= n) { /* computation of dense approximate jacobian. */ @@ -61,7 +61,7 @@ DenseIndex fdjac1( if (h == 0.) h = eps; fjac.col(j).setZero(); start = std::max(0,j-mu); - length = std::min(n-1, j+ml) - start + 1; + length = (std::min)(n-1, j+ml) - start + 1; fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h; } } diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h index a6bbc50ba..62f4aabc9 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h @@ -91,12 +91,12 @@ void lmpar( gnorm = wa1.stableNorm(); paru = gnorm / delta; if (paru == 0.) - paru = dwarf / std::min(delta,Scalar(0.1)); + paru = dwarf / (std::min)(delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ - par = std::max(par,parl); - par = std::min(par,paru); + par = (std::max)(par,parl); + par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; @@ -106,7 +106,7 @@ void lmpar( /* evaluate the function at the current value of par. */ if (par == 0.) - par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */ + par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; Matrix< Scalar, Dynamic, 1 > sdiag(n); @@ -139,13 +139,13 @@ void lmpar( /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) - parl = std::max(parl,par); + parl = (std::max)(parl,par); if (fp < 0.) - paru = std::min(paru,par); + paru = (std::min)(paru,par); /* compute an improved estimate for par. */ /* Computing MAX */ - par = std::max(parl,par+parc); + par = (std::max)(parl,par+parc); /* end of an iteration. */ } @@ -227,12 +227,12 @@ void lmpar2( gnorm = wa1.stableNorm(); paru = gnorm / delta; if (paru == 0.) - paru = dwarf / std::min(delta,Scalar(0.1)); + paru = dwarf / (std::min)(delta,Scalar(0.1)); /* if the input par lies outside of the interval (parl,paru), */ /* set par to the closer endpoint. */ - par = std::max(par,parl); - par = std::min(par,paru); + par = (std::max)(par,parl); + par = (std::min)(par,paru); if (par == 0.) par = gnorm / dxnorm; @@ -243,7 +243,7 @@ void lmpar2( /* evaluate the function at the current value of par. */ if (par == 0.) - par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */ + par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ wa1 = sqrt(par)* diag; Matrix< Scalar, Dynamic, 1 > sdiag(n); @@ -275,12 +275,12 @@ void lmpar2( /* depending on the sign of the function, update parl or paru. */ if (fp > 0.) - parl = std::max(parl,par); + parl = (std::max)(parl,par); if (fp < 0.) - paru = std::min(paru,par); + paru = (std::min)(paru,par); /* compute an improved estimate for par. */ - par = std::max(parl,par+parc); + par = (std::max)(parl,par+parc); } if (iter == 0) par = 0.; diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h index 0b8ede119..528f8eef9 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h @@ -11,6 +11,7 @@ void r1updt( bool *sing) { typedef DenseIndex Index; + const JacobiRotation IdentityRotation = JacobiRotation(1,0); /* Local variables */ const Index m = s.rows(); @@ -49,7 +50,8 @@ void r1updt( w[i] = givens.s() * s(j,i) + givens.c() * w[i]; s(j,i) = temp; } - } + } else + v_givens[j] = IdentityRotation; } /* add the spike from the rank 1 update to w. */ @@ -73,7 +75,8 @@ void r1updt( /* store the information necessary to recover the */ /* givens rotation. */ w_givens[j] = givens; - } + } else + v_givens[j] = IdentityRotation; /* test for zero diagonal elements in the output s. */ if (s(j,j) == 0.) { diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h index dbf27c481..52dc0ec01 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h @@ -80,7 +80,7 @@ public: Scalar h; int nfev=0; const typename InputType::Index n = _x.size(); - const Scalar eps = internal::sqrt((std::max(epsfcn,NumTraits::epsilon() ))); + const Scalar eps = internal::sqrt(((std::max)(epsfcn,NumTraits::epsilon() ))); ValueType val1, val2; InputType x = _x; // TODO : we should do this only if the size is not already known diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h index 13f3e0ced..62806bfb6 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h @@ -221,11 +221,11 @@ protected: Index* upperProfile = new Index[upperProfileSize]; Index* lowerProfile = new Index[lowerProfileSize]; - Index copyDiagSize = std::min(diagSize, m_diagSize); - Index copyUpperSize = std::min(upperSize, m_upperSize); - Index copyLowerSize = std::min(lowerSize, m_lowerSize); - Index copyUpperProfileSize = std::min(upperProfileSize, m_upperProfileSize); - Index copyLowerProfileSize = std::min(lowerProfileSize, m_lowerProfileSize); + Index copyDiagSize = (std::min)(diagSize, m_diagSize); + Index copyUpperSize = (std::min)(upperSize, m_upperSize); + Index copyLowerSize = (std::min)(lowerSize, m_lowerSize); + Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize); + Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize); // copy memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar)); diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h index df6b3cdf9..beb18f6cd 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h @@ -295,10 +295,10 @@ void SparseLU::extractData() const umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar()); // allocate data - m_l.resize(rows,std::min(rows,cols)); + m_l.resize(rows,(std::min)(rows,cols)); m_l.resizeNonZeros(lnz); - m_u.resize(std::min(rows,cols),cols); + m_u.resize((std::min)(rows,cols),cols); m_u.resizeNonZeros(unz); m_p.resize(rows); diff --git a/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp b/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp index ba5871e66..d773afb77 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/BVH.cpp @@ -143,10 +143,10 @@ struct TreeTest VectorType pt = VectorType::Random(); BallPointStuff i1(pt), i2(pt); - double m1 = std::numeric_limits::max(), m2 = m1; + double m1 = (std::numeric_limits::max)(), m2 = m1; for(int i = 0; i < (int)b.size(); ++i) - m1 = std::min(m1, i1.minimumOnObject(b[i])); + m1 = (std::min)(m1, i1.minimumOnObject(b[i])); m2 = BVMinimize(tree, i2); @@ -194,11 +194,11 @@ struct TreeTest BallPointStuff i1, i2; - double m1 = std::numeric_limits::max(), m2 = m1; + double m1 = (std::numeric_limits::max)(), m2 = m1; for(int i = 0; i < (int)b.size(); ++i) for(int j = 0; j < (int)v.size(); ++j) - m1 = std::min(m1, i1.minimumOnObjectObject(b[i], v[j])); + m1 = (std::min)(m1, i1.minimumOnObjectObject(b[i], v[j])); m2 = BVMinimize(tree, vTree, i2); diff --git a/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt b/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt index 660bfad41..f8c0ff486 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/unsupported/test/CMakeLists.txt @@ -80,7 +80,7 @@ ei_add_test(FFT) find_package(MPFR 2.3.0) find_package(GMP) if(MPFR_FOUND) - include_directories(${MPFR_INCLUDES}) + include_directories(${MPFR_INCLUDES} ./mpreal) ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ") set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES}) ei_add_test(mpreal_support "" "${EIGEN_MPFR_TEST_LIBRARIES}" ) diff --git a/gtsam/3rdparty/Eigen/unsupported/test/FFTW.cpp b/gtsam/3rdparty/Eigen/unsupported/test/FFTW.cpp index d3676005a..c535fd219 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/FFTW.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/FFTW.cpp @@ -70,7 +70,7 @@ complex promote(long double x) { return complex( x); { long double totalpower=0; long double difpower=0; - size_t n = min( buf1.size(),buf2.size() ); + size_t n = (min)( buf1.size(),buf2.size() ); for (size_t k=0;k