Updating Eigen to version 3.0.2
parent
16b699ba1e
commit
22cce59f2c
|
|
@ -175,13 +175,6 @@
|
|||
#include <new>
|
||||
#endif
|
||||
|
||||
// this needs to be done after all possible windows C header includes and before any Eigen source includes
|
||||
// (system C++ includes are supposed to be able to deal with this already):
|
||||
// windows.h defines min and max macros which would make Eigen fail to compile.
|
||||
#if defined(min) || defined(max)
|
||||
#error The preprocessor symbols 'min' or 'max' are defined. If you are compiling on Windows, do #define NOMINMAX to prevent windows.h from defining these symbols.
|
||||
#endif
|
||||
|
||||
// defined in bits/termios.h
|
||||
#undef B0
|
||||
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ template<> struct llt_inplace<Lower>
|
|||
|
||||
Index blockSize = size/8;
|
||||
blockSize = (blockSize/16)*16;
|
||||
blockSize = std::min(std::max(blockSize,Index(8)), Index(128));
|
||||
blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128));
|
||||
|
||||
for (Index k=0; k<size; k+=blockSize)
|
||||
{
|
||||
|
|
@ -241,7 +241,7 @@ template<> struct llt_inplace<Lower>
|
|||
// A00 | - | -
|
||||
// lu = A10 | A11 | -
|
||||
// A20 | A21 | A22
|
||||
Index bs = std::min(blockSize, size-k);
|
||||
Index bs = (std::min)(blockSize, size-k);
|
||||
Index rs = size - k - bs;
|
||||
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
|
||||
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||
if (i<=supers())
|
||||
{
|
||||
start = supers()-i;
|
||||
len = std::min(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
|
||||
len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
|
||||
}
|
||||
else if (i>=rows()-subs())
|
||||
len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));
|
||||
|
|
@ -96,11 +96,11 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||
|
||||
/** \returns a vector expression of the main diagonal */
|
||||
inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()
|
||||
{ return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,std::min(rows(),cols())); }
|
||||
{ return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
|
||||
|
||||
/** \returns a vector expression of the main diagonal (const version) */
|
||||
inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const
|
||||
{ return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,std::min(rows(),cols())); }
|
||||
{ return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
|
||||
|
||||
template<int Index> struct DiagonalIntReturnType {
|
||||
enum {
|
||||
|
|
@ -122,13 +122,13 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||
template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
|
||||
{
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N));
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||
template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
|
||||
{
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N));
|
||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
|
||||
}
|
||||
|
||||
/** \returns a vector expression of the \a i -th sub or super diagonal */
|
||||
|
|
@ -166,7 +166,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||
protected:
|
||||
|
||||
inline Index diagonalLength(Index i) const
|
||||
{ return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); }
|
||||
{ return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); }
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -284,6 +284,7 @@ class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsT
|
|||
: m_coeffs(coeffs),
|
||||
m_rows(rows), m_supers(supers), m_subs(subs)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(cols);
|
||||
//internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -742,7 +742,7 @@ struct setIdentity_impl<Derived, true>
|
|||
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
|
||||
{
|
||||
m.setZero();
|
||||
const Index size = std::min(m.rows(), m.cols());
|
||||
const Index size = (std::min)(m.rows(), m.cols());
|
||||
for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
|
||||
return m;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ template<typename MatrixType, int DiagIndex> class Diagonal
|
|||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
||||
|
||||
inline Index rows() const
|
||||
{ return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||
{ return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||
|
||||
inline Index cols() const { return 1; }
|
||||
|
||||
|
|
|
|||
|
|
@ -116,7 +116,9 @@ MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
|
|||
|
||||
//---------- implementation of L2 norm and related functions ----------
|
||||
|
||||
/** \returns the squared \em l2 norm of *this, i.e., for vectors, the dot product of *this with itself.
|
||||
/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
|
||||
* In both cases, it consists in the sum of the square of all the matrix entries.
|
||||
* For vectors, this is also equals to the dot product of \c *this with itself.
|
||||
*
|
||||
* \sa dot(), norm()
|
||||
*/
|
||||
|
|
@ -126,7 +128,9 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
|
|||
return internal::real((*this).cwiseAbs2().sum());
|
||||
}
|
||||
|
||||
/** \returns the \em l2 norm of *this, i.e., for vectors, the square root of the dot product of *this with itself.
|
||||
/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
|
||||
* In both cases, it consists in the square root of the sum of the square of all the matrix entries.
|
||||
* For vectors, this is also equals to the square root of the dot product of \c *this with itself.
|
||||
*
|
||||
* \sa dot(), squaredNorm()
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
|
|||
*/
|
||||
template<typename Scalar> struct scalar_min_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return min(a, b); }
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
|
||||
template<typename Packet>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
||||
{ return internal::pmin(a,b); }
|
||||
|
|
@ -139,7 +139,7 @@ struct functor_traits<scalar_min_op<Scalar> > {
|
|||
*/
|
||||
template<typename Scalar> struct scalar_max_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return max(a, b); }
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
|
||||
template<typename Packet>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
||||
{ return internal::pmax(a,b); }
|
||||
|
|
@ -167,8 +167,8 @@ template<typename Scalar> struct scalar_hypot_op {
|
|||
{
|
||||
using std::max;
|
||||
using std::min;
|
||||
Scalar p = max(_x, _y);
|
||||
Scalar q = min(_x, _y);
|
||||
Scalar p = (max)(_x, _y);
|
||||
Scalar q = (min)(_x, _y);
|
||||
Scalar qp = q/p;
|
||||
return p * sqrt(Scalar(1) + qp*qp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ struct isApprox_selector
|
|||
using std::min;
|
||||
const typename internal::nested<Derived,2>::type nested(x);
|
||||
const typename internal::nested<OtherDerived,2>::type otherNested(y);
|
||||
return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * min(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
||||
return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -94,7 +94,7 @@ struct isMuchSmallerThan_scalar_selector<Derived, true>
|
|||
*
|
||||
* \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
|
||||
* are considered to be approximately equal within precision \f$ p \f$ if
|
||||
* \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
|
||||
* \f[ \Vert v - w \Vert \leqslant p\,\(min)(\Vert v\Vert, \Vert w\Vert). \f]
|
||||
* For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
|
||||
* L2 norm).
|
||||
*
|
||||
|
|
|
|||
|
|
@ -134,12 +134,12 @@ pdiv(const Packet& a,
|
|||
/** \internal \returns the min of \a a and \a b (coeff-wise) */
|
||||
template<typename Packet> inline Packet
|
||||
pmin(const Packet& a,
|
||||
const Packet& b) { using std::min; return min(a, b); }
|
||||
const Packet& b) { using std::min; return (min)(a, b); }
|
||||
|
||||
/** \internal \returns the max of \a a and \a b (coeff-wise) */
|
||||
template<typename Packet> inline Packet
|
||||
pmax(const Packet& a,
|
||||
const Packet& b) { using std::max; return max(a, b); }
|
||||
const Packet& b) { using std::max; return (max)(a, b); }
|
||||
|
||||
/** \internal \returns the absolute value of \a a */
|
||||
template<typename Packet> inline Packet
|
||||
|
|
|
|||
|
|
@ -378,8 +378,8 @@ struct hypot_impl
|
|||
using std::min;
|
||||
RealScalar _x = abs(x);
|
||||
RealScalar _y = abs(y);
|
||||
RealScalar p = max(_x, _y);
|
||||
RealScalar q = min(_x, _y);
|
||||
RealScalar p = (max)(_x, _y);
|
||||
RealScalar q = (min)(_x, _y);
|
||||
RealScalar qp = q/p;
|
||||
return p * sqrt(RealScalar(1) + qp*qp);
|
||||
}
|
||||
|
|
@ -737,7 +737,7 @@ struct scalar_fuzzy_default_impl<Scalar, false, false>
|
|||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
using std::min;
|
||||
return abs(x - y) <= min(abs(x), abs(y)) * prec;
|
||||
return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
|
||||
}
|
||||
static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
|
|
@ -776,7 +776,7 @@ struct scalar_fuzzy_default_impl<Scalar, true, false>
|
|||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
using std::min;
|
||||
return abs2(x - y) <= min(abs2(x), abs2(y)) * prec * prec;
|
||||
return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ template<typename Derived> class MatrixBase
|
|||
|
||||
/** \returns the size of the main diagonal, which is min(rows(),cols()).
|
||||
* \sa rows(), cols(), SizeAtCompileTime. */
|
||||
inline Index diagonalSize() const { return std::min(rows(),cols()); }
|
||||
inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
|
||||
|
||||
/** \brief The plain matrix type corresponding to this expression.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -87,8 +87,8 @@ template<typename T> struct GenericNumTraits
|
|||
// make sure to override this for floating-point types
|
||||
return Real(0);
|
||||
}
|
||||
inline static T highest() { return std::numeric_limits<T>::max(); }
|
||||
inline static T lowest() { return IsInteger ? std::numeric_limits<T>::min() : (-std::numeric_limits<T>::max()); }
|
||||
inline static T highest() { return (std::numeric_limits<T>::max)(); }
|
||||
inline static T lowest() { return IsInteger ? (std::numeric_limits<T>::min)() : (-(std::numeric_limits<T>::max)()); }
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
enum {
|
||||
|
|
|
|||
|
|
@ -647,8 +647,8 @@ struct internal::conservative_resize_like_impl
|
|||
{
|
||||
// The storage order does not allow us to use reallocation.
|
||||
typename Derived::PlainObject tmp(rows,cols);
|
||||
const Index common_rows = std::min(rows, _this.rows());
|
||||
const Index common_cols = std::min(cols, _this.cols());
|
||||
const Index common_rows = (std::min)(rows, _this.rows());
|
||||
const Index common_cols = (std::min)(cols, _this.cols());
|
||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||
_this.derived().swap(tmp);
|
||||
}
|
||||
|
|
@ -681,8 +681,8 @@ struct internal::conservative_resize_like_impl
|
|||
{
|
||||
// The storage order does not allow us to use reallocation.
|
||||
typename Derived::PlainObject tmp(other);
|
||||
const Index common_rows = std::min(tmp.rows(), _this.rows());
|
||||
const Index common_cols = std::min(tmp.cols(), _this.cols());
|
||||
const Index common_rows = (std::min)(tmp.rows(), _this.rows());
|
||||
const Index common_cols = (std::min)(tmp.cols(), _this.cols());
|
||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||
_this.derived().swap(tmp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ MatrixBase<Derived>::stableNorm() const
|
|||
if (bi>0)
|
||||
internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
|
||||
for (; bi<n; bi+=blockSize)
|
||||
internal::stable_norm_kernel(this->segment(bi,min(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
|
||||
internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
|
||||
return scale * internal::sqrt(ssq);
|
||||
}
|
||||
|
||||
|
|
@ -103,12 +103,12 @@ MatrixBase<Derived>::blueNorm() const
|
|||
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
|
||||
// are used. For any specific computer, each of the assignment
|
||||
// statements can be replaced
|
||||
nbig = std::numeric_limits<Index>::max(); // largest integer
|
||||
nbig = (std::numeric_limits<Index>::max)(); // largest integer
|
||||
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
|
||||
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
|
||||
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
|
||||
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
|
||||
rbig = std::numeric_limits<RealScalar>::max(); // largest floating-point number
|
||||
rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
|
||||
|
||||
iexp = -((1-iemin)/2);
|
||||
b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
|
||||
|
|
@ -167,8 +167,8 @@ MatrixBase<Derived>::blueNorm() const
|
|||
}
|
||||
else
|
||||
return internal::sqrt(amed);
|
||||
asml = min(abig, amed);
|
||||
abig = max(abig, amed);
|
||||
asml = (min)(abig, amed);
|
||||
abig = (max)(abig, amed);
|
||||
if(asml <= abig*relerr)
|
||||
return abig;
|
||||
else
|
||||
|
|
|
|||
|
|
@ -350,15 +350,14 @@ struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
|
|||
template<bool DestIsTransposed, typename OtherDerived>
|
||||
struct check_transpose_aliasing_compile_time_selector
|
||||
{
|
||||
enum { ret = blas_traits<OtherDerived>::IsTransposed != DestIsTransposed
|
||||
};
|
||||
enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };
|
||||
};
|
||||
|
||||
template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
|
||||
struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
|
||||
{
|
||||
enum { ret = blas_traits<DerivedA>::IsTransposed != DestIsTransposed
|
||||
|| blas_traits<DerivedB>::IsTransposed != DestIsTransposed
|
||||
enum { ret = bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed
|
||||
|| bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -367,7 +366,7 @@ struct check_transpose_aliasing_run_time_selector
|
|||
{
|
||||
static bool run(const Scalar* dest, const OtherDerived& src)
|
||||
{
|
||||
return (blas_traits<OtherDerived>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src));
|
||||
return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
|||
EIGEN_ONLY_USED_FOR_DEBUG(col);
|
||||
eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());
|
||||
const int mode = int(Mode) & ~SelfAdjoint;
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(mode);
|
||||
eigen_assert((mode==Upper && col>=row)
|
||||
|| (mode==Lower && col<=row)
|
||||
|| ((mode==StrictlyUpper || mode==UnitUpper) && col>row)
|
||||
|
|
@ -491,7 +492,7 @@ struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearO
|
|||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows()-1);
|
||||
Index maxi = (std::min)(j, dst.rows()-1);
|
||||
for(Index i = 0; i <= maxi; ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
|
|
@ -511,7 +512,7 @@ struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearO
|
|||
{
|
||||
for(Index i = j; i < dst.rows(); ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
if (ClearOpposite)
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
||||
|
|
@ -527,7 +528,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic
|
|||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
|
|
@ -547,7 +548,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic
|
|||
{
|
||||
for(Index i = j+1; i < dst.rows(); ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
Index maxi = std::min(j, dst.rows()-1);
|
||||
Index maxi = (std::min)(j, dst.rows()-1);
|
||||
if (ClearOpposite)
|
||||
for(Index i = 0; i <= maxi; ++i)
|
||||
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
||||
|
|
@ -563,7 +564,7 @@ struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, Cl
|
|||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
|
|
@ -583,7 +584,7 @@ struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, Cl
|
|||
{
|
||||
for(Index j = 0; j < dst.cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, dst.rows());
|
||||
Index maxi = (std::min)(j, dst.rows());
|
||||
for(Index i = maxi+1; i < dst.rows(); ++i)
|
||||
dst.copyCoeff(i, j, src);
|
||||
if (ClearOpposite)
|
||||
|
|
@ -795,7 +796,7 @@ bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
|
|||
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
|
||||
for(Index j = 0; j < cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, rows()-1);
|
||||
Index maxi = (std::min)(j, rows()-1);
|
||||
for(Index i = 0; i <= maxi; ++i)
|
||||
{
|
||||
RealScalar absValue = internal::abs(coeff(i,j));
|
||||
|
|
@ -827,7 +828,7 @@ bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
|
|||
RealScalar threshold = maxAbsOnLowerPart * prec;
|
||||
for(Index j = 1; j < cols(); ++j)
|
||||
{
|
||||
Index maxi = std::min(j, rows()-1);
|
||||
Index maxi = (std::min)(j, rows()-1);
|
||||
for(Index i = 0; i < maxi; ++i)
|
||||
if(internal::abs(coeff(i, j)) > threshold) return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,6 +81,7 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdi
|
|||
template<typename LhsScalar, typename RhsScalar, int KcFactor>
|
||||
void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(n);
|
||||
// Explanations:
|
||||
// Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and
|
||||
// mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed
|
||||
|
|
@ -102,7 +103,6 @@ void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrd
|
|||
k = std::min<std::ptrdiff_t>(k, l1/kdiv);
|
||||
std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
|
||||
if(_m<m) m = _m & mr_mask;
|
||||
n = n;
|
||||
}
|
||||
|
||||
template<typename LhsScalar, typename RhsScalar>
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
|
||||
|
||||
Index kc = blocking.kc(); // cache block size along the K direction
|
||||
Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction
|
||||
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
|
||||
//Index nc = blocking.nc(); // cache block size along the N direction
|
||||
|
||||
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
|
||||
|
|
@ -103,7 +103,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
|
||||
for(Index k=0; k<depth; k+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
|
||||
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
|
||||
|
||||
// In order to reduce the chance that a thread has to wait for the other,
|
||||
// let's start by packing A'.
|
||||
|
|
@ -140,7 +140,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||
// Then keep going as usual with the remaining A'
|
||||
for(Index i=mc; i<rows; i+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i+mc,rows)-i;
|
||||
const Index actual_mc = (std::min)(i+mc,rows)-i;
|
||||
|
||||
// pack A_i,k to A'
|
||||
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
|
||||
|
|
@ -174,7 +174,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||
// (==GEMM_VAR1)
|
||||
for(Index k2=0; k2<depth; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,depth)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
|
||||
|
||||
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
|
||||
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
|
||||
|
|
@ -187,7 +187,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||
// (==GEPP_VAR1)
|
||||
for(Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,rows)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
|
||||
|
||||
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
|
||||
// Note that this block will be read a very high number of times, which is equal to the number of
|
||||
|
|
|
|||
|
|
@ -96,14 +96,14 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||
|
||||
for(Index k2=0; k2<depth; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,depth)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
|
||||
|
||||
// note that the actual rhs is the transpose/adjoint of mat
|
||||
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, size);
|
||||
|
||||
for(Index i2=0; i2<size; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,size)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,size)-i2;
|
||||
|
||||
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||
// 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel
|
||||
// 3 - after the diagonal => processed with gebp or skipped
|
||||
if (UpLo==Lower)
|
||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, std::min(size,i2), alpha,
|
||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
|
||||
-1, -1, 0, 0, allocatedBlockB);
|
||||
|
||||
sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
|
||||
|
|
@ -120,7 +120,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||
if (UpLo==Upper)
|
||||
{
|
||||
Index j2 = i2+actual_mc;
|
||||
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0), size-j2), alpha,
|
||||
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
|
||||
-1, -1, 0, 0, allocatedBlockB);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ EIGEN_DONT_INLINE static void run(
|
|||
}
|
||||
else
|
||||
{
|
||||
skipColumns = std::min(skipColumns,cols);
|
||||
skipColumns = (std::min)(skipColumns,cols);
|
||||
// note that the skiped columns are processed later.
|
||||
}
|
||||
|
||||
|
|
@ -386,7 +386,7 @@ EIGEN_DONT_INLINE static void run(
|
|||
}
|
||||
else
|
||||
{
|
||||
skipRows = std::min(skipRows,Index(rows));
|
||||
skipRows = (std::min)(skipRows,Index(rows));
|
||||
// note that the skiped columns are processed later.
|
||||
}
|
||||
eigen_internal_assert( alignmentPattern==NoneAligned
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ struct symm_pack_rhs
|
|||
}
|
||||
|
||||
// second part: diagonal block
|
||||
for(Index j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr)
|
||||
for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
|
||||
{
|
||||
// again we can split vertically in three different parts (transpose, symmetric, normal)
|
||||
// transpose
|
||||
|
|
@ -179,7 +179,7 @@ struct symm_pack_rhs
|
|||
for(Index j2=packet_cols; j2<cols; ++j2)
|
||||
{
|
||||
// transpose
|
||||
Index half = std::min(end_k,j2);
|
||||
Index half = (std::min)(end_k,j2);
|
||||
for(Index k=k2; k<half; k++)
|
||||
{
|
||||
blockB[count] = conj(rhs(j2,k));
|
||||
|
|
@ -261,7 +261,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||
Index nc = cols; // cache block size along the N direction
|
||||
computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
|
||||
// kc must smaller than mc
|
||||
kc = std::min(kc,mc);
|
||||
kc = (std::min)(kc,mc);
|
||||
|
||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
||||
std::size_t sizeB = sizeW + kc*cols;
|
||||
|
|
@ -276,7 +276,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||
|
||||
for(Index k2=0; k2<size; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,size)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,size)-k2;
|
||||
|
||||
// we have selected one row panel of rhs and one column panel of lhs
|
||||
// pack rhs's panel into a sequential chunk of memory
|
||||
|
|
@ -289,7 +289,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||
// 3 - the panel below the diagonal block => generic packed copy
|
||||
for(Index i2=0; i2<k2; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,k2)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,k2)-i2;
|
||||
// transposed packed copy
|
||||
pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
|
|
@ -297,7 +297,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||
}
|
||||
// the block diagonal
|
||||
{
|
||||
const Index actual_mc = std::min(k2+kc,size)-k2;
|
||||
const Index actual_mc = (std::min)(k2+kc,size)-k2;
|
||||
// symmetric packed copy
|
||||
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
|
|
@ -306,7 +306,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||
|
||||
for(Index i2=k2+kc; i2<size; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,size)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,size)-i2;
|
||||
gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
|
||||
(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
|
|
@ -352,14 +352,14 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
|
|||
|
||||
for(Index k2=0; k2<size; k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(k2+kc,size)-k2;
|
||||
const Index actual_kc = (std::min)(k2+kc,size)-k2;
|
||||
|
||||
pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
|
||||
|
||||
// => GEPP
|
||||
for(Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,rows)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
|
||||
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ static EIGEN_DONT_INLINE void product_selfadjoint_vector(
|
|||
rhs[i] = *it;
|
||||
}
|
||||
|
||||
Index bound = std::max(Index(0),size-8) & 0xfffffffe;
|
||||
Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
|
||||
if (FirstTriangular)
|
||||
bound = size - bound;
|
||||
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||
Scalar alpha)
|
||||
{
|
||||
// strip zeros
|
||||
Index diagSize = std::min(_rows,_depth);
|
||||
Index diagSize = (std::min)(_rows,_depth);
|
||||
Index rows = IsLower ? _rows : diagSize;
|
||||
Index depth = IsLower ? diagSize : _depth;
|
||||
Index cols = _cols;
|
||||
|
|
@ -145,7 +145,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||
IsLower ? k2>0 : k2<depth;
|
||||
IsLower ? k2-=kc : k2+=kc)
|
||||
{
|
||||
Index actual_kc = std::min(IsLower ? k2 : depth-k2, kc);
|
||||
Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
|
||||
Index actual_k2 = IsLower ? k2-actual_kc : k2;
|
||||
|
||||
// align blocks with the end of the triangular part for trapezoidal lhs
|
||||
|
|
@ -203,10 +203,10 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||
// the part below (lower case) or above (upper case) the diagonal => GEPP
|
||||
{
|
||||
Index start = IsLower ? k2 : 0;
|
||||
Index end = IsLower ? rows : std::min(actual_k2,rows);
|
||||
Index end = IsLower ? rows : (std::min)(actual_k2,rows);
|
||||
for(Index i2=start; i2<end; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(i2+mc,end)-i2;
|
||||
const Index actual_mc = (std::min)(i2+mc,end)-i2;
|
||||
gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
|
||||
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||
Scalar alpha)
|
||||
{
|
||||
// strip zeros
|
||||
Index diagSize = std::min(_cols,_depth);
|
||||
Index diagSize = (std::min)(_cols,_depth);
|
||||
Index rows = _rows;
|
||||
Index depth = IsLower ? _depth : diagSize;
|
||||
Index cols = IsLower ? diagSize : _cols;
|
||||
|
|
@ -275,7 +275,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||
IsLower ? k2<depth : k2>0;
|
||||
IsLower ? k2+=kc : k2-=kc)
|
||||
{
|
||||
Index actual_kc = std::min(IsLower ? depth-k2 : k2, kc);
|
||||
Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
|
||||
Index actual_k2 = IsLower ? k2 : k2-actual_kc;
|
||||
|
||||
// align blocks with the end of the triangular part for trapezoidal rhs
|
||||
|
|
@ -286,7 +286,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||
}
|
||||
|
||||
// remaining size
|
||||
Index rs = IsLower ? std::min(cols,actual_k2) : cols - k2;
|
||||
Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
|
||||
// size of the triangular part
|
||||
Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
|
||||
|
||||
|
|
@ -327,7 +327,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||
|
||||
for (Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(mc,rows-i2);
|
||||
const Index actual_mc = (std::min)(mc,rows-i2);
|
||||
pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||
|
||||
// triangular kernel
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
|||
|
||||
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(PanelWidth, cols-pi);
|
||||
Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
|
||||
for (Index k=0; k<actualPanelWidth; ++k)
|
||||
{
|
||||
Index i = pi + k;
|
||||
|
|
@ -107,7 +107,7 @@ struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
|||
|
||||
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(PanelWidth, cols-pi);
|
||||
Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
|
||||
for (Index k=0; k<actualPanelWidth; ++k)
|
||||
{
|
||||
Index i = pi + k;
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
|
|||
IsLower ? k2<size : k2>0;
|
||||
IsLower ? k2+=kc : k2-=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
|
||||
const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);
|
||||
|
||||
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
|
||||
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
|
||||
|
|
@ -164,7 +164,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
|
|||
Index end = IsLower ? size : k2-kc;
|
||||
for(Index i2=start; i2<end; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(mc,end-i2);
|
||||
const Index actual_mc = (std::min)(mc,end-i2);
|
||||
if (actual_mc>0)
|
||||
{
|
||||
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
|
||||
|
|
@ -222,7 +222,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
|
|||
IsLower ? k2>0 : k2<size;
|
||||
IsLower ? k2-=kc : k2+=kc)
|
||||
{
|
||||
const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
|
||||
const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);
|
||||
Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
|
||||
|
||||
Index startPanel = IsLower ? 0 : k2+actual_kc;
|
||||
|
|
@ -251,7 +251,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
|
|||
|
||||
for(Index i2=0; i2<rows; i2+=mc)
|
||||
{
|
||||
const Index actual_mc = std::min(mc,rows-i2);
|
||||
const Index actual_mc = (std::min)(mc,rows-i2);
|
||||
|
||||
// triangular solver kernel
|
||||
{
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
|
|||
IsLower ? pi<size : pi>0;
|
||||
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
|
||||
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
|
||||
|
||||
Index r = IsLower ? pi : size - pi; // remaining size
|
||||
if (r > 0)
|
||||
|
|
@ -114,7 +114,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
|
|||
IsLower ? pi<size : pi>0;
|
||||
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
||||
{
|
||||
Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
|
||||
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
|
||||
Index startBlock = IsLower ? pi : pi-actualPanelWidth;
|
||||
Index endBlock = IsLower ? pi + actualPanelWidth : 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
#define EIGEN_MAJOR_VERSION 0
|
||||
#define EIGEN_MINOR_VERSION 1
|
||||
#define EIGEN_MINOR_VERSION 2
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
|
|
@ -399,7 +399,7 @@
|
|||
#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
|
||||
template<typename OtherDerived> \
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
|
||||
METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||
(METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||
{ \
|
||||
return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
|
||||
}
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
|
|||
|
||||
if (ptr != 0)
|
||||
{
|
||||
std::memcpy(newptr, ptr, std::min(size,old_size));
|
||||
std::memcpy(newptr, ptr, (std::min)(size,old_size));
|
||||
aligned_free(ptr);
|
||||
}
|
||||
|
||||
|
|
@ -494,12 +494,12 @@ template<typename T> class aligned_stack_memory_handler
|
|||
aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
|
||||
: m_ptr(ptr), m_size(size), m_deallocate(dealloc)
|
||||
{
|
||||
if(NumTraits<T>::RequireInitialization)
|
||||
if(NumTraits<T>::RequireInitialization && m_ptr)
|
||||
Eigen::internal::construct_elements_of_array(m_ptr, size);
|
||||
}
|
||||
~aligned_stack_memory_handler()
|
||||
{
|
||||
if(NumTraits<T>::RequireInitialization)
|
||||
if(NumTraits<T>::RequireInitialization && m_ptr)
|
||||
Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
|
||||
if(m_deallocate)
|
||||
Eigen::internal::aligned_free(m_ptr);
|
||||
|
|
@ -663,12 +663,12 @@ public:
|
|||
|
||||
size_type max_size() const throw()
|
||||
{
|
||||
return std::numeric_limits<size_type>::max();
|
||||
return (std::numeric_limits<size_type>::max)();
|
||||
}
|
||||
|
||||
pointer allocate( size_type num, const_pointer* hint = 0 )
|
||||
pointer allocate( size_type num, const void* hint = 0 )
|
||||
{
|
||||
static_cast<void>( hint ); // suppress unused variable warning
|
||||
EIGEN_UNUSED_VARIABLE(hint);
|
||||
return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
|
||||
}
|
||||
|
||||
|
|
@ -903,7 +903,7 @@ inline int queryTopLevelCacheSize()
|
|||
{
|
||||
int l1, l2(-1), l3(-1);
|
||||
queryCacheSizes(l1,l2,l3);
|
||||
return std::max(l2,l3);
|
||||
return (std::max)(l2,l3);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
|
|
|||
|
|
@ -82,13 +82,17 @@ template<typename ExpressionType> class Cwise
|
|||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
|
||||
operator/(const MatrixBase<OtherDerived> &other) const;
|
||||
|
||||
/** \deprecated ArrayBase::min() */
|
||||
template<typename OtherDerived>
|
||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
||||
min(const MatrixBase<OtherDerived> &other) const;
|
||||
(min)(const MatrixBase<OtherDerived> &other) const
|
||||
{ return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); }
|
||||
|
||||
/** \deprecated ArrayBase::max() */
|
||||
template<typename OtherDerived>
|
||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
||||
max(const MatrixBase<OtherDerived> &other) const;
|
||||
(max)(const MatrixBase<OtherDerived> &other) const
|
||||
{ return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); }
|
||||
|
||||
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const;
|
||||
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const;
|
||||
|
|
|
|||
|
|
@ -96,24 +96,6 @@ inline ExpressionType& Cwise<ExpressionType>::operator/=(const MatrixBase<OtherD
|
|||
return m_matrix.const_cast_derived() = *this / other;
|
||||
}
|
||||
|
||||
/** \deprecated ArrayBase::min() */
|
||||
template<typename ExpressionType>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
||||
Cwise<ExpressionType>::min(const MatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived());
|
||||
}
|
||||
|
||||
/** \deprecated ArrayBase::max() */
|
||||
template<typename ExpressionType>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
||||
Cwise<ExpressionType>::max(const MatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived());
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
* The following functions were defined in Array
|
||||
***************************************************************************/
|
||||
|
|
|
|||
|
|
@ -51,14 +51,14 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||
{ if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
|
||||
|
||||
/** Constructs a null box with \a _dim the dimension of the ambient space. */
|
||||
inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
|
||||
inline explicit AlignedBox(int _dim) : m_(min)(_dim), m_(max)(_dim)
|
||||
{ setNull(); }
|
||||
|
||||
/** Constructs a box with extremities \a _min and \a _max. */
|
||||
inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
|
||||
inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_(min)(_min), m_(max)(_max) {}
|
||||
|
||||
/** Constructs a box containing a single point \a p. */
|
||||
inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
|
||||
inline explicit AlignedBox(const VectorType& p) : m_(min)(p), m_(max)(p) {}
|
||||
|
||||
~AlignedBox() {}
|
||||
|
||||
|
|
@ -71,18 +71,18 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||
/** Makes \c *this a null/empty box. */
|
||||
inline void setNull()
|
||||
{
|
||||
m_min.setConstant( std::numeric_limits<Scalar>::max());
|
||||
m_max.setConstant(-std::numeric_limits<Scalar>::max());
|
||||
m_min.setConstant( std::numeric_limits<Scalar>::(max)());
|
||||
m_max.setConstant(-std::numeric_limits<Scalar>::(max)());
|
||||
}
|
||||
|
||||
/** \returns the minimal corner */
|
||||
inline const VectorType& min() const { return m_min; }
|
||||
inline const VectorType& (min)() const { return m_min; }
|
||||
/** \returns a non const reference to the minimal corner */
|
||||
inline VectorType& min() { return m_min; }
|
||||
inline VectorType& (min)() { return m_min; }
|
||||
/** \returns the maximal corner */
|
||||
inline const VectorType& max() const { return m_max; }
|
||||
inline const VectorType& (max)() const { return m_max; }
|
||||
/** \returns a non const reference to the maximal corner */
|
||||
inline VectorType& max() { return m_max; }
|
||||
inline VectorType& (max)() { return m_max; }
|
||||
|
||||
/** \returns true if the point \a p is inside the box \c *this. */
|
||||
inline bool contains(const VectorType& p) const
|
||||
|
|
@ -90,19 +90,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||
|
||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||
inline bool contains(const AlignedBox& b) const
|
||||
{ return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); }
|
||||
{ return (m_min.cwise()<=b.(min)()).all() && (b.(max)().cwise()<=m_max).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||
inline AlignedBox& extend(const VectorType& p)
|
||||
{ m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; }
|
||||
{ m_min = m_min.cwise().(min)(p); m_max = m_max.cwise().(max)(p); return *this; }
|
||||
|
||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
||||
inline AlignedBox& extend(const AlignedBox& b)
|
||||
{ m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; }
|
||||
{ m_min = m_min.cwise().(min)(b.m_min); m_max = m_max.cwise().(max)(b.m_max); return *this; }
|
||||
|
||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
||||
inline AlignedBox& clamp(const AlignedBox& b)
|
||||
{ m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; }
|
||||
{ m_min = m_min.cwise().(max)(b.m_min); m_max = m_max.cwise().(min)(b.m_max); return *this; }
|
||||
|
||||
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
|
||||
inline AlignedBox& translate(const VectorType& t)
|
||||
|
|
@ -138,8 +138,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||
template<typename OtherScalarType>
|
||||
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
||||
{
|
||||
m_min = other.min().template cast<Scalar>();
|
||||
m_max = other.max().template cast<Scalar>();
|
||||
m_min = other.(min)().template cast<Scalar>();
|
||||
m_max = other.(max)().template cast<Scalar>();
|
||||
}
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
|
|
|
|||
|
|
@ -64,9 +64,9 @@ template<typename MatrixType> class SVD
|
|||
SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
|
||||
|
||||
SVD(const MatrixType& matrix)
|
||||
: m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())),
|
||||
: m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
|
||||
m_matV(matrix.cols(),matrix.cols()),
|
||||
m_sigma(std::min(matrix.rows(),matrix.cols()))
|
||||
m_sigma((std::min)(matrix.rows(),matrix.cols()))
|
||||
{
|
||||
compute(matrix);
|
||||
}
|
||||
|
|
@ -108,13 +108,13 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||
{
|
||||
const int m = matrix.rows();
|
||||
const int n = matrix.cols();
|
||||
const int nu = std::min(m,n);
|
||||
const int nu = (std::min)(m,n);
|
||||
ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
|
||||
ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
|
||||
|
||||
m_matU.resize(m, nu);
|
||||
m_matU.setZero();
|
||||
m_sigma.resize(std::min(m,n));
|
||||
m_sigma.resize((std::min)(m,n));
|
||||
m_matV.resize(n,n);
|
||||
|
||||
RowVector e(n);
|
||||
|
|
@ -126,9 +126,9 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||
|
||||
// Reduce A to bidiagonal form, storing the diagonal elements
|
||||
// in s and the super-diagonal elements in e.
|
||||
int nct = std::min(m-1,n);
|
||||
int nrt = std::max(0,std::min(n-2,m));
|
||||
for (k = 0; k < std::max(nct,nrt); ++k)
|
||||
int nct = (std::min)(m-1,n);
|
||||
int nrt = (std::max)(0,(std::min)(n-2,m));
|
||||
for (k = 0; k < (std::max)(nct,nrt); ++k)
|
||||
{
|
||||
if (k < nct)
|
||||
{
|
||||
|
|
@ -193,7 +193,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||
|
||||
|
||||
// Set up the final bidiagonal matrix or order p.
|
||||
int p = std::min(n,m+1);
|
||||
int p = (std::min)(n,m+1);
|
||||
if (nct < n)
|
||||
m_sigma[nct] = matA(nct,nct);
|
||||
if (m < p)
|
||||
|
|
@ -380,7 +380,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||
case 3:
|
||||
{
|
||||
// Calculate the shift.
|
||||
Scalar scale = std::max(std::max(std::max(std::max(
|
||||
Scalar scale = (std::max)((std::max)((std::max)((std::max)(
|
||||
ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
|
||||
ei_abs(m_sigma[k])),ei_abs(e[k]));
|
||||
Scalar sp = m_sigma[p-1]/scale;
|
||||
|
|
|
|||
|
|
@ -423,7 +423,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||
JacobiRotation<ComplexScalar> rot;
|
||||
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
|
||||
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
|
||||
m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
||||
m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
||||
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
|
||||
|
||||
for(Index i=il+1 ; i<iu ; i++)
|
||||
|
|
@ -431,7 +431,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
|
||||
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
|
||||
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
|
||||
m_matT.topRows(std::min(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
||||
m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
||||
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -435,7 +435,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
|||
Scalar norm = 0.0;
|
||||
for (Index j = 0; j < size; ++j)
|
||||
{
|
||||
norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
|
||||
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
|
||||
}
|
||||
|
||||
// Backsubstitute to find vectors of upper triangular form
|
||||
|
|
@ -564,7 +564,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
|||
|
||||
// Overflow control
|
||||
using std::max;
|
||||
Scalar t = max(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
|
||||
Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
|
||||
if ((eps * t) * t > Scalar(1))
|
||||
m_matT.block(i, n-1, size-i, 2) /= t;
|
||||
|
||||
|
|
|
|||
|
|
@ -290,7 +290,7 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
|||
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
|
||||
Scalar norm = 0.0;
|
||||
for (Index j = 0; j < size; ++j)
|
||||
norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
|
||||
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
|
||||
return norm;
|
||||
}
|
||||
|
||||
|
|
@ -442,7 +442,7 @@ inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Inde
|
|||
|
||||
// These Householder transformations form the O(n^3) part of the algorithm
|
||||
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
|
||||
m_matT.block(0, k, std::min(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
if (computeU)
|
||||
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -387,7 +387,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
|||
{
|
||||
m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0));
|
||||
if(computeEigenvectors)
|
||||
m_eivec.setOnes();
|
||||
m_eivec.setOnes(n,n);
|
||||
m_info = Success;
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
|
|
|
|||
|
|
@ -111,13 +111,13 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
}
|
||||
|
||||
/** \returns the minimal corner */
|
||||
inline const VectorType& min() const { return m_min; }
|
||||
inline const VectorType& (min)() const { return m_min; }
|
||||
/** \returns a non const reference to the minimal corner */
|
||||
inline VectorType& min() { return m_min; }
|
||||
inline VectorType& (min)() { return m_min; }
|
||||
/** \returns the maximal corner */
|
||||
inline const VectorType& max() const { return m_max; }
|
||||
inline const VectorType& (max)() const { return m_max; }
|
||||
/** \returns a non const reference to the maximal corner */
|
||||
inline VectorType& max() { return m_max; }
|
||||
inline VectorType& (max)() { return m_max; }
|
||||
|
||||
/** \returns the center of the box */
|
||||
inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
|
||||
|
|
@ -196,7 +196,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
|
||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||
inline bool contains(const AlignedBox& b) const
|
||||
{ return (m_min.array()<=b.min().array()).all() && (b.max().array()<=m_max.array()).all(); }
|
||||
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||
template<typename Derived>
|
||||
|
|
@ -287,8 +287,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
template<typename OtherScalarType>
|
||||
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
||||
{
|
||||
m_min = other.min().template cast<Scalar>();
|
||||
m_max = other.max().template cast<Scalar>();
|
||||
m_min = (other.min)().template cast<Scalar>();
|
||||
m_max = (other.max)().template cast<Scalar>();
|
||||
}
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
|
|||
}
|
||||
else
|
||||
{
|
||||
m_angle = Scalar(2)*acos(min(max(Scalar(-1),q.w()),Scalar(1)));
|
||||
m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
|
||||
m_axis = q.vec() / internal::sqrt(n2);
|
||||
}
|
||||
return *this;
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ public:
|
|||
*
|
||||
* \note If \a other is approximately parallel to *this, this method will return any point on *this.
|
||||
*/
|
||||
VectorType intersection(const Hyperplane& other)
|
||||
VectorType intersection(const Hyperplane& other) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
|
||||
Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
*
|
||||
* A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
|
||||
* direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
|
||||
* the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$.
|
||||
* the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e., the type of the coefficients
|
||||
* \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||
|
|
@ -107,7 +107,7 @@ public:
|
|||
{ return origin() + direction().dot(p-origin()) * direction(); }
|
||||
|
||||
template <int OtherOptions>
|
||||
Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
|
||||
Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
|
||||
|
||||
/** \returns \c *this with scalar type casted to \a NewScalarType
|
||||
*
|
||||
|
|
@ -159,7 +159,7 @@ inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const H
|
|||
*/
|
||||
template <typename _Scalar, int _AmbientDim, int _Options>
|
||||
template <int OtherOptions>
|
||||
inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane)
|
||||
inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
|
||||
{
|
||||
return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
|
||||
/ hyperplane.normal().dot(direction());
|
||||
|
|
|
|||
|
|
@ -533,7 +533,7 @@ template<typename MatrixType>
|
|||
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "LU is not initialized.");
|
||||
const Index smalldim = std::min(m_lu.rows(), m_lu.cols());
|
||||
const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
|
||||
// LU
|
||||
MatrixType res(m_lu.rows(),m_lu.cols());
|
||||
// FIXME the .toDenseMatrix() should not be needed...
|
||||
|
|
@ -695,7 +695,7 @@ struct solve_retval<FullPivLU<_MatrixType>, Rhs>
|
|||
const Index rows = dec().rows(), cols = dec().cols(),
|
||||
nonzero_pivots = dec().nonzeroPivots();
|
||||
eigen_assert(rhs().rows() == rows);
|
||||
const Index smalldim = std::min(rows, cols);
|
||||
const Index smalldim = (std::min)(rows, cols);
|
||||
|
||||
if(nonzero_pivots == 0)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@ struct partial_lu_impl
|
|||
{
|
||||
const Index rows = lu.rows();
|
||||
const Index cols = lu.cols();
|
||||
const Index size = std::min(rows,cols);
|
||||
const Index size = (std::min)(rows,cols);
|
||||
nb_transpositions = 0;
|
||||
int first_zero_pivot = -1;
|
||||
for(Index k = 0; k < size; ++k)
|
||||
|
|
@ -313,7 +313,7 @@ struct partial_lu_impl
|
|||
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
|
||||
MatrixType lu(lu1,0,0,rows,cols);
|
||||
|
||||
const Index size = std::min(rows,cols);
|
||||
const Index size = (std::min)(rows,cols);
|
||||
|
||||
// if the matrix is too small, no blocking:
|
||||
if(size<=16)
|
||||
|
|
@ -327,14 +327,14 @@ struct partial_lu_impl
|
|||
{
|
||||
blockSize = size/8;
|
||||
blockSize = (blockSize/16)*16;
|
||||
blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize);
|
||||
blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
|
||||
}
|
||||
|
||||
nb_transpositions = 0;
|
||||
int first_zero_pivot = -1;
|
||||
for(Index k = 0; k < size; k+=blockSize)
|
||||
{
|
||||
Index bs = std::min(size-k,blockSize); // actual size of the block
|
||||
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||
Index trows = rows - k - bs; // trailing rows
|
||||
Index tsize = size - k - bs; // trailing size
|
||||
|
||||
|
|
|
|||
|
|
@ -182,8 +182,8 @@ struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
|
|||
};
|
||||
static void run(const MatrixType& matrix, ResultType& result)
|
||||
{
|
||||
const EIGEN_ALIGN16 long long int _Sign_NP[2] = { 0x8000000000000000ll, 0x0000000000000000ll };
|
||||
const EIGEN_ALIGN16 long long int _Sign_PN[2] = { 0x0000000000000000ll, 0x8000000000000000ll };
|
||||
const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
|
||||
const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
|
||||
|
||||
// The inverse is calculated using "Divide and Conquer" technique. The
|
||||
// original matrix is divide into four 2x2 sub-matrices. Since each
|
||||
|
|
@ -316,8 +316,8 @@ struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
|
|||
iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
|
||||
iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
|
||||
|
||||
d1 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_PN));
|
||||
d2 = _mm_xor_pd(rd, _mm_load_pd((double*)_Sign_NP));
|
||||
d1 = _mm_xor_pd(rd, _Sign_PN);
|
||||
d2 = _mm_xor_pd(rd, _Sign_NP);
|
||||
|
||||
// iC = B*|C| - A*C#*D;
|
||||
dC = _mm_shuffle_pd(dC,dC,0);
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
|||
*/
|
||||
ColPivHouseholderQR(Index rows, Index cols)
|
||||
: m_qr(rows, cols),
|
||||
m_hCoeffs(std::min(rows,cols)),
|
||||
m_hCoeffs((std::min)(rows,cols)),
|
||||
m_colsPermutation(cols),
|
||||
m_colsTranspositions(cols),
|
||||
m_temp(cols),
|
||||
|
|
@ -103,7 +103,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
|||
|
||||
ColPivHouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs(std::min(matrix.rows(),matrix.cols())),
|
||||
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||
m_colsPermutation(matrix.cols()),
|
||||
m_colsTranspositions(matrix.cols()),
|
||||
m_temp(matrix.cols()),
|
||||
|
|
|
|||
|
|
@ -93,21 +93,21 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||
*/
|
||||
FullPivHouseholderQR(Index rows, Index cols)
|
||||
: m_qr(rows, cols),
|
||||
m_hCoeffs(std::min(rows,cols)),
|
||||
m_hCoeffs((std::min)(rows,cols)),
|
||||
m_rows_transpositions(rows),
|
||||
m_cols_transpositions(cols),
|
||||
m_cols_permutation(cols),
|
||||
m_temp(std::min(rows,cols)),
|
||||
m_temp((std::min)(rows,cols)),
|
||||
m_isInitialized(false),
|
||||
m_usePrescribedThreshold(false) {}
|
||||
|
||||
FullPivHouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs(std::min(matrix.rows(), matrix.cols())),
|
||||
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
|
||||
m_rows_transpositions(matrix.rows()),
|
||||
m_cols_transpositions(matrix.cols()),
|
||||
m_cols_permutation(matrix.cols()),
|
||||
m_temp(std::min(matrix.rows(), matrix.cols())),
|
||||
m_temp((std::min)(matrix.rows(), matrix.cols())),
|
||||
m_isInitialized(false),
|
||||
m_usePrescribedThreshold(false)
|
||||
{
|
||||
|
|
@ -379,7 +379,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
|
|||
{
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
||||
m_qr = matrix;
|
||||
m_hCoeffs.resize(size);
|
||||
|
|
@ -493,7 +493,7 @@ struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
|
|||
RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
|
||||
RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
|
||||
// FIXME brain dead
|
||||
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * std::min(rows,cols);
|
||||
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * (std::min)(rows,cols);
|
||||
// this internal:: prefix is needed by at least gcc 3.4 and ICC
|
||||
if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
|
||||
return;
|
||||
|
|
@ -520,7 +520,7 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
|
|||
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
|
||||
Index rows = m_qr.rows();
|
||||
Index cols = m_qr.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
MatrixQType res = MatrixQType::Identity(rows, rows);
|
||||
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
|
||||
for (Index k = size-1; k >= 0; k--)
|
||||
|
|
|
|||
|
|
@ -88,13 +88,13 @@ template<typename _MatrixType> class HouseholderQR
|
|||
*/
|
||||
HouseholderQR(Index rows, Index cols)
|
||||
: m_qr(rows, cols),
|
||||
m_hCoeffs(std::min(rows,cols)),
|
||||
m_hCoeffs((std::min)(rows,cols)),
|
||||
m_temp(cols),
|
||||
m_isInitialized(false) {}
|
||||
|
||||
HouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs(std::min(matrix.rows(),matrix.cols())),
|
||||
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||
m_temp(matrix.cols()),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
|
|
@ -210,7 +210,7 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename
|
|||
typedef typename MatrixQR::RealScalar RealScalar;
|
||||
Index rows = mat.rows();
|
||||
Index cols = mat.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
||||
eigen_assert(hCoeffs.size() == size);
|
||||
|
||||
|
|
@ -250,7 +250,7 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
|
|||
|
||||
Index rows = mat.rows();
|
||||
Index cols = mat.cols();
|
||||
Index size = std::min(rows, cols);
|
||||
Index size = (std::min)(rows, cols);
|
||||
|
||||
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
|
||||
TempType tempVector;
|
||||
|
|
@ -260,12 +260,12 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
|
|||
tempData = tempVector.data();
|
||||
}
|
||||
|
||||
Index blockSize = std::min(maxBlockSize,size);
|
||||
Index blockSize = (std::min)(maxBlockSize,size);
|
||||
|
||||
int k = 0;
|
||||
Index k = 0;
|
||||
for (k = 0; k < size; k += blockSize)
|
||||
{
|
||||
Index bs = std::min(size-k,blockSize); // actual size of the block
|
||||
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||
Index tcols = cols - k - bs; // trailing columns
|
||||
Index brows = rows-k; // rows of the block
|
||||
|
||||
|
|
@ -299,7 +299,7 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
|||
template<typename Dest> void evalTo(Dest& dst) const
|
||||
{
|
||||
const Index rows = dec().rows(), cols = dec().cols();
|
||||
const Index rank = std::min(rows, cols);
|
||||
const Index rank = (std::min)(rows, cols);
|
||||
eigen_assert(rhs().rows() == rows);
|
||||
|
||||
typename Rhs::PlainObject c(rhs());
|
||||
|
|
@ -327,7 +327,7 @@ HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType&
|
|||
{
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
Index size = std::min(rows,cols);
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
||||
m_qr = matrix;
|
||||
m_hCoeffs.resize(size);
|
||||
|
|
|
|||
|
|
@ -569,7 +569,7 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
|
|||
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
|
||||
"Use the ColPivHouseholderQR preconditioner instead.");
|
||||
}
|
||||
m_diagSize = std::min(m_rows, m_cols);
|
||||
m_diagSize = (std::min)(m_rows, m_cols);
|
||||
m_singularValues.resize(m_diagSize);
|
||||
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
|
||||
: m_computeThinU ? m_diagSize
|
||||
|
|
@ -619,8 +619,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
|
|||
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
|
||||
// keep us iterating forever.
|
||||
using std::max;
|
||||
if(max(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
|
||||
> max(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
|
||||
if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
|
||||
> (max)(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
|
||||
{
|
||||
finished = false;
|
||||
|
||||
|
|
@ -689,7 +689,7 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
|
|||
// A = U S V^*
|
||||
// So A^{-1} = V S^{-1} U^*
|
||||
|
||||
Index diagSize = std::min(dec().rows(), dec().cols());
|
||||
Index diagSize = (std::min)(dec().rows(), dec().cols());
|
||||
typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
|
||||
|
||||
Index nonzeroSingVals = dec().nonzeroSingularValues();
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class AmbiVector
|
|||
void reallocateSparse()
|
||||
{
|
||||
Index copyElements = m_allocatedElements;
|
||||
m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size);
|
||||
m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
|
||||
Index allocSize = m_allocatedElements * sizeof(ListEl);
|
||||
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
|
||||
Scalar* newBuffer = new Scalar[allocSize];
|
||||
|
|
|
|||
|
|
@ -216,7 +216,7 @@ class CompressedStorage
|
|||
{
|
||||
Scalar* newValues = new Scalar[size];
|
||||
Index* newIndices = new Index[size];
|
||||
size_t copySize = std::min(size, m_size);
|
||||
size_t copySize = (std::min)(size, m_size);
|
||||
// copy
|
||||
memcpy(newValues, m_values, copySize * sizeof(Scalar));
|
||||
memcpy(newIndices, m_indices, copySize * sizeof(Index));
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ class DynamicSparseMatrix
|
|||
{
|
||||
if (outerSize()>0)
|
||||
{
|
||||
Index reserveSizePerVector = std::max(reserveSize/outerSize(),Index(4));
|
||||
Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
{
|
||||
m_data[j].reserve(reserveSizePerVector);
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@
|
|||
// const typename internal::nested<Derived,2>::type nested(derived());
|
||||
// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
|
||||
// return (nested - otherNested).cwise().abs2().sum()
|
||||
// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
|
||||
// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
|
||||
// }
|
||||
|
||||
#endif // EIGEN_SPARSE_FUZZY_H
|
||||
|
|
|
|||
|
|
@ -257,7 +257,7 @@ class SparseMatrix
|
|||
// furthermore we bound the realloc ratio to:
|
||||
// 1) reduce multiple minor realloc when the matrix is almost filled
|
||||
// 2) avoid to allocate too much memory when the matrix is almost empty
|
||||
reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
|
||||
reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
|
||||
}
|
||||
}
|
||||
m_data.resize(m_data.size()+1,reallocRatio);
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
|||
// thanks to shallow copies, we always eval to a tempary
|
||||
Derived temp(other.rows(), other.cols());
|
||||
|
||||
temp.reserve(std::max(this->rows(),this->cols())*2);
|
||||
temp.reserve((std::max)(this->rows(),this->cols())*2);
|
||||
for (Index j=0; j<outerSize; ++j)
|
||||
{
|
||||
temp.startVec(j);
|
||||
|
|
@ -253,7 +253,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
|||
// eval without temporary
|
||||
derived().resize(other.rows(), other.cols());
|
||||
derived().setZero();
|
||||
derived().reserve(std::max(this->rows(),this->cols())*2);
|
||||
derived().reserve((std::max)(this->rows(),this->cols())*2);
|
||||
for (Index j=0; j<outerSize; ++j)
|
||||
{
|
||||
derived().startVec(j);
|
||||
|
|
|
|||
|
|
@ -383,7 +383,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||
continue;
|
||||
|
||||
Index ip = perm ? perm[i] : i;
|
||||
count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++;
|
||||
count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
}
|
||||
}
|
||||
dest._outerIndexPtr()[0] = 0;
|
||||
|
|
@ -403,8 +403,8 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||
continue;
|
||||
|
||||
Index ip = perm? perm[i] : i;
|
||||
Index k = count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++;
|
||||
dest._innerIndexPtr()[k] = DstUpLo==Lower ? std::max(ip,jp) : std::min(ip,jp);
|
||||
Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
||||
|
||||
if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
|
||||
dest._valuePtr()[k] = conj(it.value());
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res
|
|||
// estimate the number of non zero entries
|
||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
|
||||
// int t200 = rows/(log2(200)*1.39);
|
||||
// int t = (rows*100)/139;
|
||||
|
|
@ -131,7 +131,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
|||
// estimate the number of non zero entries
|
||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
|
||||
// mimics a resizeByInnerOuter:
|
||||
if(ResultType::IsRowMajor)
|
||||
|
|
@ -143,7 +143,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
|||
for (Index j=0; j<cols; ++j)
|
||||
{
|
||||
// let's do a more accurate determination of the nnz ratio for the current column j of res
|
||||
//float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
|
||||
//float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f);
|
||||
// FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
|
||||
float ratioColRes = ratioRes;
|
||||
tempVector.init(ratioColRes);
|
||||
|
|
|
|||
|
|
@ -28,32 +28,24 @@
|
|||
|
||||
#include "Eigen/src/StlSupport/details.h"
|
||||
|
||||
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
|
||||
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
|
||||
#define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...) template class std::vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
|
||||
#else
|
||||
#define EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(...)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* This section contains a convenience MACRO which allows an easy specialization of
|
||||
* std::vector such that for data types with alignment issues the correct allocator
|
||||
* is used automatically.
|
||||
*/
|
||||
#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
|
||||
EIGEN_EXPLICIT_STL_VECTOR_INSTANTIATION(__VA_ARGS__) \
|
||||
namespace std \
|
||||
{ \
|
||||
template<typename _Ay> \
|
||||
class vector<__VA_ARGS__, _Ay> \
|
||||
template<> \
|
||||
class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
|
||||
: public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
|
||||
{ \
|
||||
typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \
|
||||
public: \
|
||||
typedef __VA_ARGS__ value_type; \
|
||||
typedef typename vector_base::allocator_type allocator_type; \
|
||||
typedef typename vector_base::size_type size_type; \
|
||||
typedef typename vector_base::iterator iterator; \
|
||||
typedef vector_base::allocator_type allocator_type; \
|
||||
typedef vector_base::size_type size_type; \
|
||||
typedef vector_base::iterator iterator; \
|
||||
explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
|
||||
template<typename InputIterator> \
|
||||
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
|
||||
|
|
|
|||
|
|
@ -56,12 +56,12 @@ class EigenMatrixPrinter:
|
|||
template_params = m.split(',')
|
||||
template_params = map(lambda x:x.replace(" ", ""), template_params)
|
||||
|
||||
if template_params[1] == '-0x00000000000000001':
|
||||
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001':
|
||||
self.rows = val['m_storage']['m_rows']
|
||||
else:
|
||||
self.rows = int(template_params[1])
|
||||
|
||||
if template_params[2] == '-0x00000000000000001':
|
||||
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001':
|
||||
self.cols = val['m_storage']['m_cols']
|
||||
else:
|
||||
self.cols = int(template_params[2])
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ The output is as follows:
|
|||
|
||||
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
|
||||
|
||||
The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contains \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
|
||||
The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
|
||||
|
||||
\f[
|
||||
v =
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ this doesn't have any runtime cost (provided that you let your compiler optimize
|
|||
Both \link MatrixBase::array() .array() \endlink and \link ArrayBase::matrix() .matrix() \endlink
|
||||
can be used as rvalues and as lvalues.
|
||||
|
||||
Mixing matrices and arrays in an expression is forbidden with Eigen. For instance, you cannot add a amtrix and
|
||||
Mixing matrices and arrays in an expression is forbidden with Eigen. For instance, you cannot add a matrix and
|
||||
array directly; the operands of a \c + operator should either both be matrices or both be arrays. However,
|
||||
it is easy to convert from one to the other with \link MatrixBase::array() .array() \endlink and
|
||||
\link ArrayBase::matrix() .matrix()\endlink. The exception to this rule is the assignment operator: it is
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ Array.
|
|||
|
||||
The arguments passed to a visitor are pointers to the variables where the
|
||||
row and column position are to be stored. These variables should be of type
|
||||
\link DenseBase::Index Index \endlink (FIXME: link ok?), as shown below:
|
||||
\link DenseBase::Index Index \endlink, as shown below:
|
||||
|
||||
<table class="example">
|
||||
<tr><th>Example:</th><th>Output:</th></tr>
|
||||
|
|
@ -141,7 +141,7 @@ return a 'column-vector'</b>
|
|||
|
||||
\subsection TutorialReductionsVisitorsBroadcastingPartialReductionsCombined Combining partial reductions with other operations
|
||||
It is also possible to use the result of a partial reduction to do further processing.
|
||||
Here is another example that aims to find the column whose sum of elements is the maximum
|
||||
Here is another example that finds the column whose sum of elements is the maximum
|
||||
within a matrix. With column-wise partial reductions this can be coded as:
|
||||
|
||||
<table class="example">
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ namespace Eigen {
|
|||
\li \b Previous: \ref TutorialReductionsVisitorsBroadcasting
|
||||
\li \b Next: \ref TutorialSparse
|
||||
|
||||
In this tutorial, we will shortly introduce the many possibilities offered by the \ref Geometry_Module "geometry module", namely 2D and 3D rotations and projective or affine transformations.
|
||||
In this tutorial, we will briefly introduce the many possibilities offered by the \ref Geometry_Module "geometry module", namely 2D and 3D rotations and projective or affine transformations.
|
||||
|
||||
\b Table \b of \b contents
|
||||
- \ref TutorialGeoElementaryTransformations
|
||||
|
|
@ -78,7 +78,7 @@ representations are rotation matrices, while for other usages Quaternion is the
|
|||
representation of choice as they are compact, fast and stable. Finally Rotation2D and
|
||||
AngleAxis are mainly convenient types to create other rotation objects.
|
||||
|
||||
<strong>Notes on Translation and Scaling</strong>\n Likewise AngleAxis, these classes were
|
||||
<strong>Notes on Translation and Scaling</strong>\n Like AngleAxis, these classes were
|
||||
designed to simplify the creation/initialization of linear (Matrix) and affine (Transform)
|
||||
transformations. Nevertheless, unlike AngleAxis which is inefficient to use, these classes
|
||||
might still be interesting to write generic and efficient algorithms taking as input any
|
||||
|
|
@ -186,7 +186,7 @@ matNxN = t.extractRotation();
|
|||
While transformation objects can be created and updated concatenating elementary transformations,
|
||||
the Transform class also features a procedural API:
|
||||
<table class="manual">
|
||||
<tr><th></th><th>procedurale API</th><th>equivalent natural API </th></tr>
|
||||
<tr><th></th><th>procedural API</th><th>equivalent natural API </th></tr>
|
||||
<tr><td>Translation</td><td>\code
|
||||
t.translate(Vector_(tx,ty,..));
|
||||
t.pretranslate(Vector_(tx,ty,..));
|
||||
|
|
@ -234,7 +234,7 @@ t = Translation_(..) * t * RotationType(..) * Translation_(..) * Scaling_(..);
|
|||
<table class="manual">
|
||||
<tr><td style="max-width:30em;">
|
||||
Euler angles might be convenient to create rotation objects.
|
||||
On the other hand, since there exist 24 differents convension,they are pretty confusing to use. This example shows how
|
||||
On the other hand, since there exist 24 different conventions, they are pretty confusing to use. This example shows how
|
||||
to create a rotation matrix according to the 2-1-2 convention.</td><td>\code
|
||||
Matrix3f m;
|
||||
m = AngleAxisf(angle1, Vector3f::UnitZ())
|
||||
|
|
|
|||
|
|
@ -55,17 +55,17 @@ and its internal representation using the Compressed Column Storage format:
|
|||
</table>
|
||||
Outer indices:<table class="manual"><tr><td>0</td><td>2</td><td>4</td><td>5</td><td>6</td><td>\em 7 </td></tr></table>
|
||||
|
||||
As you can guess, here the storage order is even more important than with dense matrix. We will therefore often make a clear difference between the \em inner and \em outer dimensions. For instance, it is easy to loop over the coefficients of an \em inner \em vector (e.g., a column of a column-major matrix), but completely inefficient to do the same for an \em outer \em vector (e.g., a row of a col-major matrix).
|
||||
As you might guess, here the storage order is even more important than with dense matrices. We will therefore often make a clear difference between the \em inner and \em outer dimensions. For instance, it is efficient to loop over the coefficients of an \em inner \em vector (e.g., a column of a column-major matrix), but completely inefficient to do the same for an \em outer \em vector (e.g., a row of a column-major matrix).
|
||||
|
||||
The SparseVector class implements the same compressed storage scheme but, of course, without any outer index buffer.
|
||||
|
||||
Since all nonzero coefficients of such a matrix are sequentially stored in memory, random insertion of new nonzeros can be extremely costly. To overcome this limitation, Eigen's sparse module provides a DynamicSparseMatrix class which is basically implemented as an array of SparseVector. In other words, a DynamicSparseMatrix is a SparseMatrix where the values and inner-indices arrays have been splitted into multiple small and resizable arrays. Assuming the number of nonzeros per inner vector is relatively low, this slight modification allow for very fast random insertion at the cost of a slight memory overhead and a lost of compatibility with other sparse libraries used by some of our highlevel solvers. Note that the major memory overhead comes from the extra memory preallocated by each inner vector to avoid an expensive memory reallocation at every insertion.
|
||||
Since all nonzero coefficients of such a matrix are sequentially stored in memory, inserting a new nonzero near the "beginning" of the matrix can be extremely costly. As described below (\ref TutorialSparseFilling), one strategy is to fill nonzero coefficients in order. In cases where this is not possible, Eigen's sparse module also provides a DynamicSparseMatrix class which allows efficient random insertion. DynamicSparseMatrix is essentially implemented as an array of SparseVector, where the values and inner-indices arrays have been split into multiple small and resizable arrays. Assuming the number of nonzeros per inner vector is relatively small, this modification allows for very fast random insertion at the cost of a slight memory overhead (due to extra memory preallocated by each inner vector to avoid an expensive memory reallocation at every insertion) and a loss of compatibility with other sparse libraries used by some of our high-level solvers. Once complete, a DynamicSparseMatrix can be converted to a SparseMatrix to permit usage of these sparse libraries.
|
||||
|
||||
To summarize, it is recommanded to use a SparseMatrix whenever this is possible, and reserve the use of DynamicSparseMatrix for matrix assembly purpose when a SparseMatrix is not flexible enough. The respective pro/cons of both representations are summarized in the following table:
|
||||
To summarize, it is recommended to use SparseMatrix whenever possible, and reserve the use of DynamicSparseMatrix to assemble a sparse matrix in cases when a SparseMatrix is not flexible enough. The respective pros/cons of both representations are summarized in the following table:
|
||||
|
||||
<table class="manual">
|
||||
<tr><td></td> <td>SparseMatrix</td><td>DynamicSparseMatrix</td></tr>
|
||||
<tr><td>memory usage</td><td>***</td><td>**</td></tr>
|
||||
<tr><td>memory efficiency</td><td>***</td><td>**</td></tr>
|
||||
<tr><td>sorted insertion</td><td>***</td><td>***</td></tr>
|
||||
<tr><td>random insertion \n in sorted inner vector</td><td>**</td><td>**</td></tr>
|
||||
<tr><td>sorted insertion \n in random inner vector</td><td>-</td><td>***</td></tr>
|
||||
|
|
@ -82,7 +82,7 @@ To summarize, it is recommanded to use a SparseMatrix whenever this is possible,
|
|||
|
||||
\b Matrix \b and \b vector \b properties \n
|
||||
|
||||
Here mat and vec represents any sparse-matrix and sparse-vector types respectively.
|
||||
Here mat and vec represent any sparse-matrix and sparse-vector type, respectively.
|
||||
|
||||
<table class="manual">
|
||||
<tr><td>Standard \n dimensions</td><td>\code
|
||||
|
|
@ -96,7 +96,7 @@ mat.innerSize()
|
|||
mat.outerSize()\endcode</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr><td>Number of non \n zero coefficiens</td><td>\code
|
||||
<tr><td>Number of non \n zero coefficients</td><td>\code
|
||||
mat.nonZeros() \endcode</td>
|
||||
<td>\code
|
||||
vec.nonZeros() \endcode</td></tr>
|
||||
|
|
@ -105,12 +105,12 @@ vec.nonZeros() \endcode</td></tr>
|
|||
|
||||
\b Iterating \b over \b the \b nonzero \b coefficients \n
|
||||
|
||||
Iterating over the coefficients of a sparse matrix can be done only in the same order than the storage order. Here is an example:
|
||||
Iterating over the coefficients of a sparse matrix can be done only in the same order as the storage order. Here is an example:
|
||||
<table class="manual">
|
||||
<tr><td>
|
||||
\code
|
||||
SparseMatrixType mat(rows,cols);
|
||||
for (int k=0; k\<m1.outerSize(); ++k)
|
||||
for (int k=0; k<m1.outerSize(); ++k)
|
||||
for (SparseMatrixType::InnerIterator it(mat,k); it; ++it)
|
||||
{
|
||||
it.value();
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
|
|||
// check basic properties of dot, norm, norm2
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
RealScalar ref = NumTraits<Scalar>::IsInteger ? 0 : std::max((s1 * v1 + s2 * v2).norm(),v3.norm());
|
||||
RealScalar ref = NumTraits<Scalar>::IsInteger ? 0 : (std::max)((s1 * v1 + s2 * v2).norm(),v3.norm());
|
||||
VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), internal::conj(s1) * v1.dot(v3) + internal::conj(s2) * v2.dot(v3), ref));
|
||||
VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), ref));
|
||||
VERIFY_IS_APPROX(internal::conj(v1.dot(v2)), v2.dot(v1));
|
||||
|
|
@ -76,7 +76,7 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
|
|||
|
||||
// check compatibility of dot and adjoint
|
||||
|
||||
ref = NumTraits<Scalar>::IsInteger ? 0 : std::max(std::max(v1.norm(),v2.norm()),std::max((square * v2).norm(),(square.adjoint() * v1).norm()));
|
||||
ref = NumTraits<Scalar>::IsInteger ? 0 : (std::max)((std::max)(v1.norm(),v2.norm()),(std::max)((square * v2).norm(),(square.adjoint() * v1).norm()));
|
||||
VERIFY(test_isApproxWithRef(v1.dot(square * v2), (square.adjoint() * v1).dot(v2), ref));
|
||||
|
||||
// like in testBasicStuff, test operator() to check const-qualification
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ template<typename MatrixType> void bandmatrix(const MatrixType& _m)
|
|||
m.col(i).setConstant(static_cast<RealScalar>(i+1));
|
||||
dm1.col(i).setConstant(static_cast<RealScalar>(i+1));
|
||||
}
|
||||
Index d = std::min(rows,cols);
|
||||
Index d = (std::min)(rows,cols);
|
||||
Index a = std::max<Index>(0,cols-d-supers);
|
||||
Index b = std::max<Index>(0,rows-d-subs);
|
||||
if(a>0) dm1.block(0,d+supers,rows,a).setZero();
|
||||
|
|
|
|||
|
|
@ -28,6 +28,14 @@
|
|||
#include "main.h"
|
||||
#include <functional>
|
||||
|
||||
#ifdef min
|
||||
#undef min
|
||||
#endif
|
||||
|
||||
#ifdef max
|
||||
#undef max
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
|
||||
template<typename Scalar> struct AddIfNull {
|
||||
|
|
|
|||
|
|
@ -23,6 +23,9 @@
|
|||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#define min(A,B) please_protect_your_min_with_parentheses
|
||||
#define max(A,B) please_protect_your_max_with_parentheses
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cerrno>
|
||||
#include <ctime>
|
||||
|
|
@ -429,7 +432,7 @@ void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typenam
|
|||
MatrixBType b = MatrixBType::Random(cols,cols);
|
||||
|
||||
// set the diagonal such that only desired_rank non-zero entries reamain
|
||||
const Index diag_size = std::min(d.rows(),d.cols());
|
||||
const Index diag_size = (std::min)(d.rows(),d.cols());
|
||||
if(diag_size != desired_rank)
|
||||
d.diagonal().segment(desired_rank, diag_size-desired_rank) = VectorType::Zero(diag_size-desired_rank);
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
#include <Eigen/Core>
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
|
||||
/** \defgroup MPRealSupport_Module MPFRC++ Support module
|
||||
*
|
||||
* \code
|
||||
|
|
@ -45,6 +45,8 @@ namespace Eigen {
|
|||
* via the <a href="http://www.holoborodko.com/pavel/?page_id=12">MPFR C++</a>
|
||||
* library which itself is built upon <a href="http://www.mpfr.org/">MPFR</a>/<a href="http://gmplib.org/">GMP</a>.
|
||||
*
|
||||
* You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder.
|
||||
*
|
||||
* Here is an example:
|
||||
*
|
||||
\code
|
||||
|
|
@ -129,18 +131,6 @@ int main()
|
|||
return a + (b-a) * random<mpfr::mpreal>();
|
||||
}
|
||||
|
||||
template<> struct conj_impl<mpfr::mpreal> { inline static const mpfr::mpreal& run(const mpfr::mpreal& x) { return x; } };
|
||||
template<> struct real_impl<mpfr::mpreal> { inline static const mpfr::mpreal& run(const mpfr::mpreal& x) { return x; } };
|
||||
template<> struct imag_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal&) { return mpfr::mpreal(0); } };
|
||||
template<> struct abs_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::fabs(x); } };
|
||||
template<> struct abs2_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return x*x; } };
|
||||
template<> struct sqrt_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::sqrt(x); } };
|
||||
template<> struct exp_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::exp(x); } };
|
||||
template<> struct log_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::log(x); } };
|
||||
template<> struct sin_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::sin(x); } };
|
||||
template<> struct cos_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x) { return mpfr::cos(x); } };
|
||||
template<> struct pow_impl<mpfr::mpreal> { inline static const mpfr::mpreal run(const mpfr::mpreal& x, const mpfr::mpreal& y) { return mpfr::pow(x, y); } };
|
||||
|
||||
bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
||||
{
|
||||
return mpfr::abs(a) <= mpfr::abs(b) * prec;
|
||||
|
|
@ -148,7 +138,7 @@ int main()
|
|||
|
||||
inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
||||
{
|
||||
return mpfr::abs(a - b) <= mpfr::min(mpfr::abs(a), mpfr::abs(b)) * prec;
|
||||
return mpfr::abs(a - b) <= (mpfr::min)(mpfr::abs(a), mpfr::abs(b)) * prec;
|
||||
}
|
||||
|
||||
inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
||||
|
|
|
|||
|
|
@ -188,11 +188,11 @@ template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Affine>& t)
|
|||
template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Projective>& t) { glLoadMatrix(t.matrix()); }
|
||||
template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,AffineCompact>& t) { glLoadMatrix(Transform<Scalar,3,Affine>(t).matrix()); }
|
||||
|
||||
void glRotate(const Rotation2D<float>& rot)
|
||||
static void glRotate(const Rotation2D<float>& rot)
|
||||
{
|
||||
glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f);
|
||||
}
|
||||
void glRotate(const Rotation2D<double>& rot)
|
||||
static void glRotate(const Rotation2D<double>& rot)
|
||||
{
|
||||
glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0);
|
||||
}
|
||||
|
|
@ -256,18 +256,18 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev)
|
|||
|
||||
#ifdef GL_VERSION_2_0
|
||||
|
||||
void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); }
|
||||
void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); }
|
||||
static void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); }
|
||||
static void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); }
|
||||
|
||||
void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); }
|
||||
void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); }
|
||||
static void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); }
|
||||
static void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); }
|
||||
|
||||
void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); }
|
||||
void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); }
|
||||
static void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); }
|
||||
static void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); }
|
||||
|
||||
void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); }
|
||||
void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); }
|
||||
void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); }
|
||||
static void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); }
|
||||
static void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); }
|
||||
static void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); }
|
||||
|
||||
|
||||
EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const)
|
||||
|
|
@ -286,12 +286,12 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix
|
|||
|
||||
#ifdef GL_VERSION_2_1
|
||||
|
||||
void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); }
|
||||
void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); }
|
||||
void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); }
|
||||
void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); }
|
||||
void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); }
|
||||
void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); }
|
||||
static void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); }
|
||||
static void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); }
|
||||
static void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); }
|
||||
static void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); }
|
||||
static void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); }
|
||||
static void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); }
|
||||
|
||||
EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei)
|
||||
EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei)
|
||||
|
|
@ -304,9 +304,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix
|
|||
|
||||
#ifdef GL_VERSION_3_0
|
||||
|
||||
void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); }
|
||||
void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); }
|
||||
void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); }
|
||||
static void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); }
|
||||
static void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); }
|
||||
static void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); }
|
||||
|
||||
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei)
|
||||
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei)
|
||||
|
|
@ -315,9 +315,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei)
|
|||
#endif
|
||||
|
||||
#ifdef GL_ARB_gpu_shader_fp64
|
||||
void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); }
|
||||
void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); }
|
||||
void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); }
|
||||
static void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); }
|
||||
static void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); }
|
||||
static void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); }
|
||||
|
||||
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei)
|
||||
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei)
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer
|
|||
todo.pop();
|
||||
|
||||
for(; oBegin != oEnd; ++oBegin) //go through child objects
|
||||
minimum = std::min(minimum, minimizer.minimumOnObject(*oBegin));
|
||||
minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin));
|
||||
|
||||
for(; vBegin != vEnd; ++vBegin) { //go through child volumes
|
||||
Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin));
|
||||
|
|
@ -274,12 +274,12 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
|
|||
|
||||
for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree
|
||||
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
|
||||
minimum = std::min(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
|
||||
minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
|
||||
}
|
||||
|
||||
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
|
||||
Helper2 helper(*oBegin1, minimizer);
|
||||
minimum = std::min(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
|
||||
minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -288,7 +288,7 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
|
|||
|
||||
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
|
||||
Helper1 helper(*oCur2, minimizer);
|
||||
minimum = std::min(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
|
||||
minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
|
||||
}
|
||||
|
||||
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
|
||||
|
|
|
|||
|
|
@ -294,11 +294,19 @@ struct kissfft_impl
|
|||
inline
|
||||
void fwd2( Complex * dst,const Complex *src,int n0,int n1)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(dst);
|
||||
EIGEN_UNUSED_VARIABLE(src);
|
||||
EIGEN_UNUSED_VARIABLE(n0);
|
||||
EIGEN_UNUSED_VARIABLE(n1);
|
||||
}
|
||||
|
||||
inline
|
||||
void inv2( Complex * dst,const Complex *src,int n0,int n1)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(dst);
|
||||
EIGEN_UNUSED_VARIABLE(src);
|
||||
EIGEN_UNUSED_VARIABLE(n0);
|
||||
EIGEN_UNUSED_VARIABLE(n1);
|
||||
}
|
||||
|
||||
// real-to-complex forward FFT
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
|
|||
|
||||
if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
|
||||
if (transition || iter.first()) gamma = 0.0;
|
||||
else gamma = std::max(0.0, (rho - old_z.dot(z)) / rho_1);
|
||||
else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
|
||||
p = z + gamma*p;
|
||||
|
||||
++iter;
|
||||
|
|
@ -185,7 +185,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
|
|||
{
|
||||
Scalar bb = C.row(i).dot(p) - f[i];
|
||||
if (bb > 0.0)
|
||||
lambda = std::min(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
|
||||
lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
|
||||
}
|
||||
}
|
||||
x += lambda * p;
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ class IterationController
|
|||
bool converged(double nr)
|
||||
{
|
||||
m_res = internal::abs(nr);
|
||||
m_resminreach = std::min(m_resminreach, m_res);
|
||||
m_resminreach = (std::min)(m_resminreach, m_res);
|
||||
return converged();
|
||||
}
|
||||
template<typename VectorType> bool converged(const VectorType &v)
|
||||
|
|
|
|||
|
|
@ -127,10 +127,10 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(Index s, const MatrixType
|
|||
for (Index r = 0; r < n; r++) {
|
||||
RealScalar mx = 0;
|
||||
for (Index i = 0; i < n; i++)
|
||||
mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
|
||||
mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
|
||||
if (r != 0)
|
||||
rfactorial *= RealScalar(r);
|
||||
delta = std::max(delta, mx / rfactorial);
|
||||
delta = (std::max)(delta, mx / rfactorial);
|
||||
}
|
||||
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
|
||||
if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ namespace HybridNonLinearSolverSpace {
|
|||
TolTooSmall = 3,
|
||||
NotMakingProgressJacobian = 4,
|
||||
NotMakingProgressIterations = 5,
|
||||
UserAksed = 6
|
||||
UserAsked = 6
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -181,7 +181,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
|
|||
/* and calculate its norm. */
|
||||
nfev = 1;
|
||||
if ( functor(x, fvec) < 0)
|
||||
return HybridNonLinearSolverSpace::UserAksed;
|
||||
return HybridNonLinearSolverSpace::UserAsked;
|
||||
fnorm = fvec.stableNorm();
|
||||
|
||||
/* initialize iteration counter and monitors. */
|
||||
|
|
@ -207,7 +207,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||
|
||||
/* calculate the jacobian matrix. */
|
||||
if ( functor.df(x, fjac) < 0)
|
||||
return HybridNonLinearSolverSpace::UserAksed;
|
||||
return HybridNonLinearSolverSpace::UserAsked;
|
||||
++njev;
|
||||
|
||||
wa2 = fjac.colwise().blueNorm();
|
||||
|
|
@ -228,7 +228,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||
}
|
||||
|
||||
/* compute the qr factorization of the jacobian. */
|
||||
wa2 = fjac.colwise().blueNorm();
|
||||
HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
|
||||
|
||||
/* copy the triangular factor of the qr factorization into r. */
|
||||
|
|
@ -255,11 +254,11 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||
|
||||
/* on the first iteration, adjust the initial step bound. */
|
||||
if (iter == 1)
|
||||
delta = std::min(delta,pnorm);
|
||||
delta = (std::min)(delta,pnorm);
|
||||
|
||||
/* evaluate the function at x + p and calculate its norm. */
|
||||
if ( functor(wa2, wa4) < 0)
|
||||
return HybridNonLinearSolverSpace::UserAksed;
|
||||
return HybridNonLinearSolverSpace::UserAsked;
|
||||
++nfev;
|
||||
fnorm1 = wa4.stableNorm();
|
||||
|
||||
|
|
@ -289,7 +288,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||
ncfail = 0;
|
||||
++ncsuc;
|
||||
if (ratio >= Scalar(.5) || ncsuc > 1)
|
||||
delta = std::max(delta, pnorm / Scalar(.5));
|
||||
delta = (std::max)(delta, pnorm / Scalar(.5));
|
||||
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
|
||||
delta = pnorm / Scalar(.5);
|
||||
}
|
||||
|
|
@ -322,7 +321,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||
/* tests for termination and stringent tolerances. */
|
||||
if (nfev >= parameters.maxfev)
|
||||
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
|
||||
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
||||
if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
||||
return HybridNonLinearSolverSpace::TolTooSmall;
|
||||
if (nslow2 == 5)
|
||||
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
|
||||
|
|
@ -420,7 +419,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &
|
|||
/* and calculate its norm. */
|
||||
nfev = 1;
|
||||
if ( functor(x, fvec) < 0)
|
||||
return HybridNonLinearSolverSpace::UserAksed;
|
||||
return HybridNonLinearSolverSpace::UserAsked;
|
||||
fnorm = fvec.stableNorm();
|
||||
|
||||
/* initialize iteration counter and monitors. */
|
||||
|
|
@ -448,8 +447,8 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||
|
||||
/* calculate the jacobian matrix. */
|
||||
if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
|
||||
return HybridNonLinearSolverSpace::UserAksed;
|
||||
nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
|
||||
return HybridNonLinearSolverSpace::UserAsked;
|
||||
nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
|
||||
|
||||
wa2 = fjac.colwise().blueNorm();
|
||||
|
||||
|
|
@ -469,7 +468,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||
}
|
||||
|
||||
/* compute the qr factorization of the jacobian. */
|
||||
wa2 = fjac.colwise().blueNorm();
|
||||
HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
|
||||
|
||||
/* copy the triangular factor of the qr factorization into r. */
|
||||
|
|
@ -496,11 +494,11 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||
|
||||
/* on the first iteration, adjust the initial step bound. */
|
||||
if (iter == 1)
|
||||
delta = std::min(delta,pnorm);
|
||||
delta = (std::min)(delta,pnorm);
|
||||
|
||||
/* evaluate the function at x + p and calculate its norm. */
|
||||
if ( functor(wa2, wa4) < 0)
|
||||
return HybridNonLinearSolverSpace::UserAksed;
|
||||
return HybridNonLinearSolverSpace::UserAsked;
|
||||
++nfev;
|
||||
fnorm1 = wa4.stableNorm();
|
||||
|
||||
|
|
@ -530,7 +528,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||
ncfail = 0;
|
||||
++ncsuc;
|
||||
if (ratio >= Scalar(.5) || ncsuc > 1)
|
||||
delta = std::max(delta, pnorm / Scalar(.5));
|
||||
delta = (std::max)(delta, pnorm / Scalar(.5));
|
||||
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
|
||||
delta = pnorm / Scalar(.5);
|
||||
}
|
||||
|
|
@ -563,7 +561,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||
/* tests for termination and stringent tolerances. */
|
||||
if (nfev >= parameters.maxfev)
|
||||
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
|
||||
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
||||
if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
||||
return HybridNonLinearSolverSpace::TolTooSmall;
|
||||
if (nslow2 == 5)
|
||||
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
|
||||
|
|
|
|||
|
|
@ -263,7 +263,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
|||
if (fnorm != 0.)
|
||||
for (Index j = 0; j < n; ++j)
|
||||
if (wa2[permutation.indices()[j]] != 0.)
|
||||
gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||
gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||
|
||||
/* test for convergence of the gradient norm. */
|
||||
if (gnorm <= parameters.gtol)
|
||||
|
|
@ -285,7 +285,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
|||
|
||||
/* on the first iteration, adjust the initial step bound. */
|
||||
if (iter == 1)
|
||||
delta = std::min(delta,pnorm);
|
||||
delta = (std::min)(delta,pnorm);
|
||||
|
||||
/* evaluate the function at x + p and calculate its norm. */
|
||||
if ( functor(wa2, wa4) < 0)
|
||||
|
|
@ -321,7 +321,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
|||
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
|
||||
temp = Scalar(.1);
|
||||
/* Computing MIN */
|
||||
delta = temp * std::min(delta, pnorm / Scalar(.1));
|
||||
delta = temp * (std::min)(delta, pnorm / Scalar(.1));
|
||||
par /= temp;
|
||||
} else if (!(par != 0. && ratio < Scalar(.75))) {
|
||||
delta = pnorm / Scalar(.5);
|
||||
|
|
@ -510,7 +510,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
|||
if (fnorm != 0.)
|
||||
for (j = 0; j < n; ++j)
|
||||
if (wa2[permutation.indices()[j]] != 0.)
|
||||
gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||
gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||
|
||||
/* test for convergence of the gradient norm. */
|
||||
if (gnorm <= parameters.gtol)
|
||||
|
|
@ -532,7 +532,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
|||
|
||||
/* on the first iteration, adjust the initial step bound. */
|
||||
if (iter == 1)
|
||||
delta = std::min(delta,pnorm);
|
||||
delta = (std::min)(delta,pnorm);
|
||||
|
||||
/* evaluate the function at x + p and calculate its norm. */
|
||||
if ( functor(wa2, wa4) < 0)
|
||||
|
|
@ -568,7 +568,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
|||
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
|
||||
temp = Scalar(.1);
|
||||
/* Computing MIN */
|
||||
delta = temp * std::min(delta, pnorm / Scalar(.1));
|
||||
delta = temp * (std::min)(delta, pnorm / Scalar(.1));
|
||||
par /= temp;
|
||||
} else if (!(par != 0. && ratio < Scalar(.75))) {
|
||||
delta = pnorm / Scalar(.5);
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ algo_end:
|
|||
|
||||
/* form appropriate convex combination of the gauss-newton */
|
||||
/* direction and the scaled gradient direction. */
|
||||
temp = (1.-alpha) * std::min(sgnorm,delta);
|
||||
temp = (1.-alpha) * (std::min)(sgnorm,delta);
|
||||
x = temp * wa1 + alpha * x;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ DenseIndex fdjac1(
|
|||
Matrix< Scalar, Dynamic, 1 > wa1(n);
|
||||
Matrix< Scalar, Dynamic, 1 > wa2(n);
|
||||
|
||||
eps = sqrt(std::max(epsfcn,epsmch));
|
||||
eps = sqrt((std::max)(epsfcn,epsmch));
|
||||
msum = ml + mu + 1;
|
||||
if (msum >= n) {
|
||||
/* computation of dense approximate jacobian. */
|
||||
|
|
@ -61,7 +61,7 @@ DenseIndex fdjac1(
|
|||
if (h == 0.) h = eps;
|
||||
fjac.col(j).setZero();
|
||||
start = std::max<Index>(0,j-mu);
|
||||
length = std::min(n-1, j+ml) - start + 1;
|
||||
length = (std::min)(n-1, j+ml) - start + 1;
|
||||
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,12 +91,12 @@ void lmpar(
|
|||
gnorm = wa1.stableNorm();
|
||||
paru = gnorm / delta;
|
||||
if (paru == 0.)
|
||||
paru = dwarf / std::min(delta,Scalar(0.1));
|
||||
paru = dwarf / (std::min)(delta,Scalar(0.1));
|
||||
|
||||
/* if the input par lies outside of the interval (parl,paru), */
|
||||
/* set par to the closer endpoint. */
|
||||
par = std::max(par,parl);
|
||||
par = std::min(par,paru);
|
||||
par = (std::max)(par,parl);
|
||||
par = (std::min)(par,paru);
|
||||
if (par == 0.)
|
||||
par = gnorm / dxnorm;
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ void lmpar(
|
|||
|
||||
/* evaluate the function at the current value of par. */
|
||||
if (par == 0.)
|
||||
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
||||
par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
||||
wa1 = sqrt(par)* diag;
|
||||
|
||||
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
||||
|
|
@ -139,13 +139,13 @@ void lmpar(
|
|||
|
||||
/* depending on the sign of the function, update parl or paru. */
|
||||
if (fp > 0.)
|
||||
parl = std::max(parl,par);
|
||||
parl = (std::max)(parl,par);
|
||||
if (fp < 0.)
|
||||
paru = std::min(paru,par);
|
||||
paru = (std::min)(paru,par);
|
||||
|
||||
/* compute an improved estimate for par. */
|
||||
/* Computing MAX */
|
||||
par = std::max(parl,par+parc);
|
||||
par = (std::max)(parl,par+parc);
|
||||
|
||||
/* end of an iteration. */
|
||||
}
|
||||
|
|
@ -227,12 +227,12 @@ void lmpar2(
|
|||
gnorm = wa1.stableNorm();
|
||||
paru = gnorm / delta;
|
||||
if (paru == 0.)
|
||||
paru = dwarf / std::min(delta,Scalar(0.1));
|
||||
paru = dwarf / (std::min)(delta,Scalar(0.1));
|
||||
|
||||
/* if the input par lies outside of the interval (parl,paru), */
|
||||
/* set par to the closer endpoint. */
|
||||
par = std::max(par,parl);
|
||||
par = std::min(par,paru);
|
||||
par = (std::max)(par,parl);
|
||||
par = (std::min)(par,paru);
|
||||
if (par == 0.)
|
||||
par = gnorm / dxnorm;
|
||||
|
||||
|
|
@ -243,7 +243,7 @@ void lmpar2(
|
|||
|
||||
/* evaluate the function at the current value of par. */
|
||||
if (par == 0.)
|
||||
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
||||
par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
||||
wa1 = sqrt(par)* diag;
|
||||
|
||||
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
||||
|
|
@ -275,12 +275,12 @@ void lmpar2(
|
|||
|
||||
/* depending on the sign of the function, update parl or paru. */
|
||||
if (fp > 0.)
|
||||
parl = std::max(parl,par);
|
||||
parl = (std::max)(parl,par);
|
||||
if (fp < 0.)
|
||||
paru = std::min(paru,par);
|
||||
paru = (std::min)(paru,par);
|
||||
|
||||
/* compute an improved estimate for par. */
|
||||
par = std::max(parl,par+parc);
|
||||
par = (std::max)(parl,par+parc);
|
||||
}
|
||||
if (iter == 0)
|
||||
par = 0.;
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ void r1updt(
|
|||
bool *sing)
|
||||
{
|
||||
typedef DenseIndex Index;
|
||||
const JacobiRotation<Scalar> IdentityRotation = JacobiRotation<Scalar>(1,0);
|
||||
|
||||
/* Local variables */
|
||||
const Index m = s.rows();
|
||||
|
|
@ -49,7 +50,8 @@ void r1updt(
|
|||
w[i] = givens.s() * s(j,i) + givens.c() * w[i];
|
||||
s(j,i) = temp;
|
||||
}
|
||||
}
|
||||
} else
|
||||
v_givens[j] = IdentityRotation;
|
||||
}
|
||||
|
||||
/* add the spike from the rank 1 update to w. */
|
||||
|
|
@ -73,7 +75,8 @@ void r1updt(
|
|||
/* store the information necessary to recover the */
|
||||
/* givens rotation. */
|
||||
w_givens[j] = givens;
|
||||
}
|
||||
} else
|
||||
v_givens[j] = IdentityRotation;
|
||||
|
||||
/* test for zero diagonal elements in the output s. */
|
||||
if (s(j,j) == 0.) {
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ public:
|
|||
Scalar h;
|
||||
int nfev=0;
|
||||
const typename InputType::Index n = _x.size();
|
||||
const Scalar eps = internal::sqrt((std::max(epsfcn,NumTraits<Scalar>::epsilon() )));
|
||||
const Scalar eps = internal::sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() )));
|
||||
ValueType val1, val2;
|
||||
InputType x = _x;
|
||||
// TODO : we should do this only if the size is not already known
|
||||
|
|
|
|||
|
|
@ -221,11 +221,11 @@ protected:
|
|||
Index* upperProfile = new Index[upperProfileSize];
|
||||
Index* lowerProfile = new Index[lowerProfileSize];
|
||||
|
||||
Index copyDiagSize = std::min(diagSize, m_diagSize);
|
||||
Index copyUpperSize = std::min(upperSize, m_upperSize);
|
||||
Index copyLowerSize = std::min(lowerSize, m_lowerSize);
|
||||
Index copyUpperProfileSize = std::min(upperProfileSize, m_upperProfileSize);
|
||||
Index copyLowerProfileSize = std::min(lowerProfileSize, m_lowerProfileSize);
|
||||
Index copyDiagSize = (std::min)(diagSize, m_diagSize);
|
||||
Index copyUpperSize = (std::min)(upperSize, m_upperSize);
|
||||
Index copyLowerSize = (std::min)(lowerSize, m_lowerSize);
|
||||
Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize);
|
||||
Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize);
|
||||
|
||||
// copy
|
||||
memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar));
|
||||
|
|
|
|||
|
|
@ -295,10 +295,10 @@ void SparseLU<MatrixType,UmfPack>::extractData() const
|
|||
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
|
||||
|
||||
// allocate data
|
||||
m_l.resize(rows,std::min(rows,cols));
|
||||
m_l.resize(rows,(std::min)(rows,cols));
|
||||
m_l.resizeNonZeros(lnz);
|
||||
|
||||
m_u.resize(std::min(rows,cols),cols);
|
||||
m_u.resize((std::min)(rows,cols),cols);
|
||||
m_u.resizeNonZeros(unz);
|
||||
|
||||
m_p.resize(rows);
|
||||
|
|
|
|||
|
|
@ -143,10 +143,10 @@ struct TreeTest
|
|||
VectorType pt = VectorType::Random();
|
||||
BallPointStuff<Dim> i1(pt), i2(pt);
|
||||
|
||||
double m1 = std::numeric_limits<double>::max(), m2 = m1;
|
||||
double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
|
||||
|
||||
for(int i = 0; i < (int)b.size(); ++i)
|
||||
m1 = std::min(m1, i1.minimumOnObject(b[i]));
|
||||
m1 = (std::min)(m1, i1.minimumOnObject(b[i]));
|
||||
|
||||
m2 = BVMinimize(tree, i2);
|
||||
|
||||
|
|
@ -194,11 +194,11 @@ struct TreeTest
|
|||
|
||||
BallPointStuff<Dim> i1, i2;
|
||||
|
||||
double m1 = std::numeric_limits<double>::max(), m2 = m1;
|
||||
double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
|
||||
|
||||
for(int i = 0; i < (int)b.size(); ++i)
|
||||
for(int j = 0; j < (int)v.size(); ++j)
|
||||
m1 = std::min(m1, i1.minimumOnObjectObject(b[i], v[j]));
|
||||
m1 = (std::min)(m1, i1.minimumOnObjectObject(b[i], v[j]));
|
||||
|
||||
m2 = BVMinimize(tree, vTree, i2);
|
||||
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ ei_add_test(FFT)
|
|||
find_package(MPFR 2.3.0)
|
||||
find_package(GMP)
|
||||
if(MPFR_FOUND)
|
||||
include_directories(${MPFR_INCLUDES})
|
||||
include_directories(${MPFR_INCLUDES} ./mpreal)
|
||||
ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ")
|
||||
set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES})
|
||||
ei_add_test(mpreal_support "" "${EIGEN_MPFR_TEST_LIBRARIES}" )
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ complex<long double> promote(long double x) { return complex<long double>( x);
|
|||
{
|
||||
long double totalpower=0;
|
||||
long double difpower=0;
|
||||
size_t n = min( buf1.size(),buf2.size() );
|
||||
size_t n = (min)( buf1.size(),buf2.size() );
|
||||
for (size_t k=0;k<n;++k) {
|
||||
totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
|
||||
difpower += norm(buf1[k] - buf2[k]);
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,562 @@
|
|||
/*
|
||||
Default header file for malloc-2.8.x, written by Doug Lea
|
||||
and released to the public domain, as explained at
|
||||
http://creativecommons.org/licenses/publicdomain.
|
||||
|
||||
last update: Wed May 27 14:25:17 2009 Doug Lea (dl at gee)
|
||||
|
||||
This header is for ANSI C/C++ only. You can set any of
|
||||
the following #defines before including:
|
||||
|
||||
* If USE_DL_PREFIX is defined, it is assumed that malloc.c
|
||||
was also compiled with this option, so all routines
|
||||
have names starting with "dl".
|
||||
|
||||
* If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
|
||||
file will be #included AFTER <malloc.h>. This is needed only if
|
||||
your system defines a struct mallinfo that is incompatible with the
|
||||
standard one declared here. Otherwise, you can include this file
|
||||
INSTEAD of your system system <malloc.h>. At least on ANSI, all
|
||||
declarations should be compatible with system versions
|
||||
|
||||
* If MSPACES is defined, declarations for mspace versions are included.
|
||||
*/
|
||||
|
||||
#ifndef MALLOC_280_H
|
||||
#define MALLOC_280_H
|
||||
|
||||
#define USE_DL_PREFIX
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stddef.h> /* for size_t */
|
||||
|
||||
#ifndef ONLY_MSPACES
|
||||
#define ONLY_MSPACES 0 /* define to a value */
|
||||
#endif /* ONLY_MSPACES */
|
||||
#ifndef NO_MALLINFO
|
||||
#define NO_MALLINFO 0
|
||||
#endif /* NO_MALLINFO */
|
||||
|
||||
|
||||
#if !ONLY_MSPACES
|
||||
|
||||
#ifndef USE_DL_PREFIX
|
||||
#define dlcalloc calloc
|
||||
#define dlfree free
|
||||
#define dlmalloc malloc
|
||||
#define dlmemalign memalign
|
||||
#define dlrealloc realloc
|
||||
#define dlvalloc valloc
|
||||
#define dlpvalloc pvalloc
|
||||
#define dlmallinfo mallinfo
|
||||
#define dlmallopt mallopt
|
||||
#define dlmalloc_trim malloc_trim
|
||||
#define dlmalloc_stats malloc_stats
|
||||
#define dlmalloc_usable_size malloc_usable_size
|
||||
#define dlmalloc_footprint malloc_footprint
|
||||
#define dlindependent_calloc independent_calloc
|
||||
#define dlindependent_comalloc independent_comalloc
|
||||
#endif /* USE_DL_PREFIX */
|
||||
#if !NO_MALLINFO
|
||||
#ifndef HAVE_USR_INCLUDE_MALLOC_H
|
||||
#ifndef _MALLOC_H
|
||||
#ifndef MALLINFO_FIELD_TYPE
|
||||
#define MALLINFO_FIELD_TYPE size_t
|
||||
#endif /* MALLINFO_FIELD_TYPE */
|
||||
#ifndef STRUCT_MALLINFO_DECLARED
|
||||
#define STRUCT_MALLINFO_DECLARED 1
|
||||
struct mallinfo {
|
||||
MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
|
||||
MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
|
||||
MALLINFO_FIELD_TYPE smblks; /* always 0 */
|
||||
MALLINFO_FIELD_TYPE hblks; /* always 0 */
|
||||
MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
|
||||
MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
|
||||
MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
|
||||
MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
|
||||
MALLINFO_FIELD_TYPE fordblks; /* total free space */
|
||||
MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
|
||||
};
|
||||
#endif /* STRUCT_MALLINFO_DECLARED */
|
||||
#endif /* _MALLOC_H */
|
||||
#endif /* HAVE_USR_INCLUDE_MALLOC_H */
|
||||
#endif /* !NO_MALLINFO */
|
||||
|
||||
/*
|
||||
malloc(size_t n)
|
||||
Returns a pointer to a newly allocated chunk of at least n bytes, or
|
||||
null if no space is available, in which case errno is set to ENOMEM
|
||||
on ANSI C systems.
|
||||
|
||||
If n is zero, malloc returns a minimum-sized chunk. (The minimum
|
||||
size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
|
||||
systems.) Note that size_t is an unsigned type, so calls with
|
||||
arguments that would be negative if signed are interpreted as
|
||||
requests for huge amounts of space, which will often fail. The
|
||||
maximum supported value of n differs across systems, but is in all
|
||||
cases less than the maximum representable value of a size_t.
|
||||
*/
|
||||
void* dlmalloc(size_t);
|
||||
|
||||
/*
|
||||
free(void* p)
|
||||
Releases the chunk of memory pointed to by p, that had been previously
|
||||
allocated using malloc or a related routine such as realloc.
|
||||
It has no effect if p is null. If p was not malloced or already
|
||||
freed, free(p) will by default cuase the current program to abort.
|
||||
*/
|
||||
void dlfree(void*);
|
||||
|
||||
/*
|
||||
calloc(size_t n_elements, size_t element_size);
|
||||
Returns a pointer to n_elements * element_size bytes, with all locations
|
||||
set to zero.
|
||||
*/
|
||||
void* dlcalloc(size_t, size_t);
|
||||
|
||||
/*
|
||||
realloc(void* p, size_t n)
|
||||
Returns a pointer to a chunk of size n that contains the same data
|
||||
as does chunk p up to the minimum of (n, p's size) bytes, or null
|
||||
if no space is available.
|
||||
|
||||
The returned pointer may or may not be the same as p. The algorithm
|
||||
prefers extending p in most cases when possible, otherwise it
|
||||
employs the equivalent of a malloc-copy-free sequence.
|
||||
|
||||
If p is null, realloc is equivalent to malloc.
|
||||
|
||||
If space is not available, realloc returns null, errno is set (if on
|
||||
ANSI) and p is NOT freed.
|
||||
|
||||
if n is for fewer bytes than already held by p, the newly unused
|
||||
space is lopped off and freed if possible. realloc with a size
|
||||
argument of zero (re)allocates a minimum-sized chunk.
|
||||
|
||||
The old unix realloc convention of allowing the last-free'd chunk
|
||||
to be used as an argument to realloc is not supported.
|
||||
*/
|
||||
|
||||
void* dlrealloc(void*, size_t);
|
||||
|
||||
/*
|
||||
memalign(size_t alignment, size_t n);
|
||||
Returns a pointer to a newly allocated chunk of n bytes, aligned
|
||||
in accord with the alignment argument.
|
||||
|
||||
The alignment argument should be a power of two. If the argument is
|
||||
not a power of two, the nearest greater power is used.
|
||||
8-byte alignment is guaranteed by normal malloc calls, so don't
|
||||
bother calling memalign with an argument of 8 or less.
|
||||
|
||||
Overreliance on memalign is a sure way to fragment space.
|
||||
*/
|
||||
void* dlmemalign(size_t, size_t);
|
||||
|
||||
/*
|
||||
valloc(size_t n);
|
||||
Equivalent to memalign(pagesize, n), where pagesize is the page
|
||||
size of the system. If the pagesize is unknown, 4096 is used.
|
||||
*/
|
||||
void* dlvalloc(size_t);
|
||||
|
||||
/*
|
||||
mallopt(int parameter_number, int parameter_value)
|
||||
Sets tunable parameters The format is to provide a
|
||||
(parameter-number, parameter-value) pair. mallopt then sets the
|
||||
corresponding parameter to the argument value if it can (i.e., so
|
||||
long as the value is meaningful), and returns 1 if successful else
|
||||
0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
|
||||
normally defined in malloc.h. None of these are use in this malloc,
|
||||
so setting them has no effect. But this malloc also supports other
|
||||
options in mallopt:
|
||||
|
||||
Symbol param # default allowed param values
|
||||
M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
|
||||
M_GRANULARITY -2 page size any power of 2 >= page size
|
||||
M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
|
||||
*/
|
||||
int dlmallopt(int, int);
|
||||
|
||||
#define M_TRIM_THRESHOLD (-1)
|
||||
#define M_GRANULARITY (-2)
|
||||
#define M_MMAP_THRESHOLD (-3)
|
||||
|
||||
|
||||
/*
|
||||
malloc_footprint();
|
||||
Returns the number of bytes obtained from the system. The total
|
||||
number of bytes allocated by malloc, realloc etc., is less than this
|
||||
value. Unlike mallinfo, this function returns only a precomputed
|
||||
result, so can be called frequently to monitor memory consumption.
|
||||
Even if locks are otherwise defined, this function does not use them,
|
||||
so results might not be up to date.
|
||||
*/
|
||||
size_t dlmalloc_footprint();
|
||||
|
||||
#if !NO_MALLINFO
|
||||
/*
|
||||
mallinfo()
|
||||
Returns (by copy) a struct containing various summary statistics:
|
||||
|
||||
arena: current total non-mmapped bytes allocated from system
|
||||
ordblks: the number of free chunks
|
||||
smblks: always zero.
|
||||
hblks: current number of mmapped regions
|
||||
hblkhd: total bytes held in mmapped regions
|
||||
usmblks: the maximum total allocated space. This will be greater
|
||||
than current total if trimming has occurred.
|
||||
fsmblks: always zero
|
||||
uordblks: current total allocated space (normal or mmapped)
|
||||
fordblks: total free space
|
||||
keepcost: the maximum number of bytes that could ideally be released
|
||||
back to system via malloc_trim. ("ideally" means that
|
||||
it ignores page restrictions etc.)
|
||||
|
||||
Because these fields are ints, but internal bookkeeping may
|
||||
be kept as longs, the reported values may wrap around zero and
|
||||
thus be inaccurate.
|
||||
*/
|
||||
|
||||
struct mallinfo dlmallinfo(void);
|
||||
#endif /* NO_MALLINFO */
|
||||
|
||||
/*
|
||||
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
|
||||
|
||||
independent_calloc is similar to calloc, but instead of returning a
|
||||
single cleared space, it returns an array of pointers to n_elements
|
||||
independent elements that can hold contents of size elem_size, each
|
||||
of which starts out cleared, and can be independently freed,
|
||||
realloc'ed etc. The elements are guaranteed to be adjacently
|
||||
allocated (this is not guaranteed to occur with multiple callocs or
|
||||
mallocs), which may also improve cache locality in some
|
||||
applications.
|
||||
|
||||
The "chunks" argument is optional (i.e., may be null, which is
|
||||
probably the most typical usage). If it is null, the returned array
|
||||
is itself dynamically allocated and should also be freed when it is
|
||||
no longer needed. Otherwise, the chunks array must be of at least
|
||||
n_elements in length. It is filled in with the pointers to the
|
||||
chunks.
|
||||
|
||||
In either case, independent_calloc returns this pointer array, or
|
||||
null if the allocation failed. If n_elements is zero and "chunks"
|
||||
is null, it returns a chunk representing an array with zero elements
|
||||
(which should be freed if not wanted).
|
||||
|
||||
Each element must be individually freed when it is no longer
|
||||
needed. If you'd like to instead be able to free all at once, you
|
||||
should instead use regular calloc and assign pointers into this
|
||||
space to represent elements. (In this case though, you cannot
|
||||
independently free elements.)
|
||||
|
||||
independent_calloc simplifies and speeds up implementations of many
|
||||
kinds of pools. It may also be useful when constructing large data
|
||||
structures that initially have a fixed number of fixed-sized nodes,
|
||||
but the number is not known at compile time, and some of the nodes
|
||||
may later need to be freed. For example:
|
||||
|
||||
struct Node { int item; struct Node* next; };
|
||||
|
||||
struct Node* build_list() {
|
||||
struct Node** pool;
|
||||
int n = read_number_of_nodes_needed();
|
||||
if (n <= 0) return 0;
|
||||
pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
|
||||
if (pool == 0) die();
|
||||
// organize into a linked list...
|
||||
struct Node* first = pool[0];
|
||||
for (i = 0; i < n-1; ++i)
|
||||
pool[i]->next = pool[i+1];
|
||||
free(pool); // Can now free the array (or not, if it is needed later)
|
||||
return first;
|
||||
}
|
||||
*/
|
||||
void** dlindependent_calloc(size_t, size_t, void**);
|
||||
|
||||
/*
|
||||
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
|
||||
|
||||
independent_comalloc allocates, all at once, a set of n_elements
|
||||
chunks with sizes indicated in the "sizes" array. It returns
|
||||
an array of pointers to these elements, each of which can be
|
||||
independently freed, realloc'ed etc. The elements are guaranteed to
|
||||
be adjacently allocated (this is not guaranteed to occur with
|
||||
multiple callocs or mallocs), which may also improve cache locality
|
||||
in some applications.
|
||||
|
||||
The "chunks" argument is optional (i.e., may be null). If it is null
|
||||
the returned array is itself dynamically allocated and should also
|
||||
be freed when it is no longer needed. Otherwise, the chunks array
|
||||
must be of at least n_elements in length. It is filled in with the
|
||||
pointers to the chunks.
|
||||
|
||||
In either case, independent_comalloc returns this pointer array, or
|
||||
null if the allocation failed. If n_elements is zero and chunks is
|
||||
null, it returns a chunk representing an array with zero elements
|
||||
(which should be freed if not wanted).
|
||||
|
||||
Each element must be individually freed when it is no longer
|
||||
needed. If you'd like to instead be able to free all at once, you
|
||||
should instead use a single regular malloc, and assign pointers at
|
||||
particular offsets in the aggregate space. (In this case though, you
|
||||
cannot independently free elements.)
|
||||
|
||||
independent_comallac differs from independent_calloc in that each
|
||||
element may have a different size, and also that it does not
|
||||
automatically clear elements.
|
||||
|
||||
independent_comalloc can be used to speed up allocation in cases
|
||||
where several structs or objects must always be allocated at the
|
||||
same time. For example:
|
||||
|
||||
struct Head { ... }
|
||||
struct Foot { ... }
|
||||
|
||||
void send_message(char* msg) {
|
||||
int msglen = strlen(msg);
|
||||
size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
|
||||
void* chunks[3];
|
||||
if (independent_comalloc(3, sizes, chunks) == 0)
|
||||
die();
|
||||
struct Head* head = (struct Head*)(chunks[0]);
|
||||
char* body = (char*)(chunks[1]);
|
||||
struct Foot* foot = (struct Foot*)(chunks[2]);
|
||||
// ...
|
||||
}
|
||||
|
||||
In general though, independent_comalloc is worth using only for
|
||||
larger values of n_elements. For small values, you probably won't
|
||||
detect enough difference from series of malloc calls to bother.
|
||||
|
||||
Overuse of independent_comalloc can increase overall memory usage,
|
||||
since it cannot reuse existing noncontiguous small chunks that
|
||||
might be available for some of the elements.
|
||||
*/
|
||||
void** dlindependent_comalloc(size_t, size_t*, void**);
|
||||
|
||||
|
||||
/*
|
||||
pvalloc(size_t n);
|
||||
Equivalent to valloc(minimum-page-that-holds(n)), that is,
|
||||
round up n to nearest pagesize.
|
||||
*/
|
||||
void* dlpvalloc(size_t);
|
||||
|
||||
/*
|
||||
malloc_trim(size_t pad);
|
||||
|
||||
If possible, gives memory back to the system (via negative arguments
|
||||
to sbrk) if there is unused memory at the `high' end of the malloc
|
||||
pool or in unused MMAP segments. You can call this after freeing
|
||||
large blocks of memory to potentially reduce the system-level memory
|
||||
requirements of a program. However, it cannot guarantee to reduce
|
||||
memory. Under some allocation patterns, some large free blocks of
|
||||
memory will be locked between two used chunks, so they cannot be
|
||||
given back to the system.
|
||||
|
||||
The `pad' argument to malloc_trim represents the amount of free
|
||||
trailing space to leave untrimmed. If this argument is zero, only
|
||||
the minimum amount of memory to maintain internal data structures
|
||||
will be left. Non-zero arguments can be supplied to maintain enough
|
||||
trailing space to service future expected allocations without having
|
||||
to re-obtain memory from the system.
|
||||
|
||||
Malloc_trim returns 1 if it actually released any memory, else 0.
|
||||
*/
|
||||
int dlmalloc_trim(size_t);
|
||||
|
||||
/*
|
||||
malloc_stats();
|
||||
Prints on stderr the amount of space obtained from the system (both
|
||||
via sbrk and mmap), the maximum amount (which may be more than
|
||||
current if malloc_trim and/or munmap got called), and the current
|
||||
number of bytes allocated via malloc (or realloc, etc) but not yet
|
||||
freed. Note that this is the number of bytes allocated, not the
|
||||
number requested. It will be larger than the number requested
|
||||
because of alignment and bookkeeping overhead. Because it includes
|
||||
alignment wastage as being in use, this figure may be greater than
|
||||
zero even when no user-level chunks are allocated.
|
||||
|
||||
The reported current and maximum system memory can be inaccurate if
|
||||
a program makes other calls to system memory allocation functions
|
||||
(normally sbrk) outside of malloc.
|
||||
|
||||
malloc_stats prints only the most commonly interesting statistics.
|
||||
More information can be obtained by calling mallinfo.
|
||||
*/
|
||||
void dlmalloc_stats();
|
||||
|
||||
#endif /* !ONLY_MSPACES */
|
||||
|
||||
/*
|
||||
malloc_usable_size(void* p);
|
||||
|
||||
Returns the number of bytes you can actually use in
|
||||
an allocated chunk, which may be more than you requested (although
|
||||
often not) due to alignment and minimum size constraints.
|
||||
You can use this many bytes without worrying about
|
||||
overwriting other allocated objects. This is not a particularly great
|
||||
programming practice. malloc_usable_size can be more useful in
|
||||
debugging and assertions, for example:
|
||||
|
||||
p = malloc(n);
|
||||
assert(malloc_usable_size(p) >= 256);
|
||||
*/
|
||||
size_t dlmalloc_usable_size(void*);
|
||||
|
||||
|
||||
#if MSPACES
|
||||
|
||||
/*
|
||||
mspace is an opaque type representing an independent
|
||||
region of space that supports mspace_malloc, etc.
|
||||
*/
|
||||
typedef void* mspace;
|
||||
|
||||
/*
|
||||
create_mspace creates and returns a new independent space with the
|
||||
given initial capacity, or, if 0, the default granularity size. It
|
||||
returns null if there is no system memory available to create the
|
||||
space. If argument locked is non-zero, the space uses a separate
|
||||
lock to control access. The capacity of the space will grow
|
||||
dynamically as needed to service mspace_malloc requests. You can
|
||||
control the sizes of incremental increases of this space by
|
||||
compiling with a different DEFAULT_GRANULARITY or dynamically
|
||||
setting with mallopt(M_GRANULARITY, value).
|
||||
*/
|
||||
mspace create_mspace(size_t capacity, int locked);
|
||||
|
||||
/*
|
||||
destroy_mspace destroys the given space, and attempts to return all
|
||||
of its memory back to the system, returning the total number of
|
||||
bytes freed. After destruction, the results of access to all memory
|
||||
used by the space become undefined.
|
||||
*/
|
||||
size_t destroy_mspace(mspace msp);
|
||||
|
||||
/*
|
||||
create_mspace_with_base uses the memory supplied as the initial base
|
||||
of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
|
||||
space is used for bookkeeping, so the capacity must be at least this
|
||||
large. (Otherwise 0 is returned.) When this initial space is
|
||||
exhausted, additional memory will be obtained from the system.
|
||||
Destroying this space will deallocate all additionally allocated
|
||||
space (if possible) but not the initial base.
|
||||
*/
|
||||
mspace create_mspace_with_base(void* base, size_t capacity, int locked);
|
||||
|
||||
/*
|
||||
mspace_track_large_chunks controls whether requests for large chunks
|
||||
are allocated in their own untracked mmapped regions, separate from
|
||||
others in this mspace. By default large chunks are not tracked,
|
||||
which reduces fragmentation. However, such chunks are not
|
||||
necessarily released to the system upon destroy_mspace. Enabling
|
||||
tracking by setting to true may increase fragmentation, but avoids
|
||||
leakage when relying on destroy_mspace to release all memory
|
||||
allocated using this space. The function returns the previous
|
||||
setting.
|
||||
*/
|
||||
int mspace_track_large_chunks(mspace msp, int enable);
|
||||
|
||||
/*
|
||||
mspace_malloc behaves as malloc, but operates within
|
||||
the given space.
|
||||
*/
|
||||
void* mspace_malloc(mspace msp, size_t bytes);
|
||||
|
||||
/*
|
||||
mspace_free behaves as free, but operates within
|
||||
the given space.
|
||||
|
||||
If compiled with FOOTERS==1, mspace_free is not actually needed.
|
||||
free may be called instead of mspace_free because freed chunks from
|
||||
any space are handled by their originating spaces.
|
||||
*/
|
||||
void mspace_free(mspace msp, void* mem);
|
||||
|
||||
/*
|
||||
mspace_realloc behaves as realloc, but operates within
|
||||
the given space.
|
||||
|
||||
If compiled with FOOTERS==1, mspace_realloc is not actually
|
||||
needed. realloc may be called instead of mspace_realloc because
|
||||
realloced chunks from any space are handled by their originating
|
||||
spaces.
|
||||
*/
|
||||
void* mspace_realloc(mspace msp, void* mem, size_t newsize);
|
||||
|
||||
/*
|
||||
mspace_calloc behaves as calloc, but operates within
|
||||
the given space.
|
||||
*/
|
||||
void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
|
||||
|
||||
/*
|
||||
mspace_memalign behaves as memalign, but operates within
|
||||
the given space.
|
||||
*/
|
||||
void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
|
||||
|
||||
/*
|
||||
mspace_independent_calloc behaves as independent_calloc, but
|
||||
operates within the given space.
|
||||
*/
|
||||
void** mspace_independent_calloc(mspace msp, size_t n_elements,
|
||||
size_t elem_size, void* chunks[]);
|
||||
|
||||
/*
|
||||
mspace_independent_comalloc behaves as independent_comalloc, but
|
||||
operates within the given space.
|
||||
*/
|
||||
void** mspace_independent_comalloc(mspace msp, size_t n_elements,
|
||||
size_t sizes[], void* chunks[]);
|
||||
|
||||
/*
|
||||
mspace_footprint() returns the number of bytes obtained from the
|
||||
system for this space.
|
||||
*/
|
||||
size_t mspace_footprint(mspace msp);
|
||||
|
||||
|
||||
#if !NO_MALLINFO
|
||||
/*
|
||||
mspace_mallinfo behaves as mallinfo, but reports properties of
|
||||
the given space.
|
||||
*/
|
||||
struct mallinfo mspace_mallinfo(mspace msp);
|
||||
#endif /* NO_MALLINFO */
|
||||
|
||||
/*
|
||||
malloc_usable_size(void* p) behaves the same as malloc_usable_size;
|
||||
*/
|
||||
size_t mspace_usable_size(void* mem);
|
||||
|
||||
/*
|
||||
mspace_malloc_stats behaves as malloc_stats, but reports
|
||||
properties of the given space.
|
||||
*/
|
||||
void mspace_malloc_stats(mspace msp);
|
||||
|
||||
/*
|
||||
mspace_trim behaves as malloc_trim, but
|
||||
operates within the given space.
|
||||
*/
|
||||
int mspace_trim(mspace msp, size_t pad);
|
||||
|
||||
/*
|
||||
An alias for mallopt.
|
||||
*/
|
||||
int mspace_mallopt(int, int);
|
||||
|
||||
#endif /* MSPACES */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* end of extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* MALLOC_280_H */
|
||||
|
|
@ -1,408 +1,507 @@
|
|||
/*
|
||||
Multi-precision real number class. C++ wrapper fo MPFR library.
|
||||
Project homepage: http://www.holoborodko.com/pavel/
|
||||
Contact e-mail: pavel@holoborodko.com
|
||||
|
||||
Copyright (c) 2008-2010 Pavel Holoborodko
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
Contributors:
|
||||
Brian Gladman, Helmut Jarausch, Fokko Beekhof, Ulrich Mutze,
|
||||
Heinz van Saanen, Pere Constans, Dmitriy Gubanov
|
||||
*/
|
||||
|
||||
#include <cstring>
|
||||
#include "mpreal.h"
|
||||
|
||||
using std::ws;
|
||||
using std::cerr;
|
||||
using std::endl;
|
||||
using std::string;
|
||||
using std::ostream;
|
||||
using std::istream;
|
||||
|
||||
namespace mpfr{
|
||||
|
||||
mp_rnd_t mpreal::default_rnd = mpfr_get_default_rounding_mode();
|
||||
mp_prec_t mpreal::default_prec = mpfr_get_default_prec();
|
||||
int mpreal::default_base = 10;
|
||||
int mpreal::double_bits = -1;
|
||||
|
||||
// Default constructor: creates mp number and initializes it to 0.
|
||||
mpreal::mpreal()
|
||||
{
|
||||
mpfr_init2(mp,default_prec);
|
||||
mpfr_set_ui(mp,0,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpreal& u)
|
||||
{
|
||||
mpfr_init2(mp,mpfr_get_prec(u.mp));
|
||||
mpfr_set(mp,u.mp,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpfr_t u)
|
||||
{
|
||||
mpfr_init2(mp,mpfr_get_prec(u));
|
||||
mpfr_set(mp,u,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpf_t u)
|
||||
{
|
||||
mpfr_init2(mp,mpf_get_prec(u));
|
||||
mpfr_set_f(mp,u,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpz_t u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_z(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpq_t u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_q(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
if(double_bits == -1 || fits_in_bits(u, double_bits))
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_d(mp,u,mode);
|
||||
}
|
||||
else
|
||||
throw conversion_overflow();
|
||||
}
|
||||
|
||||
mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_ld(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_ui(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_ui(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_si(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_si(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_str(mp, s, base, mode);
|
||||
}
|
||||
|
||||
mpreal::~mpreal()
|
||||
{
|
||||
mpfr_clear(mp);
|
||||
}
|
||||
|
||||
// Operators - Assignment
|
||||
mpreal& mpreal::operator=(const char* s)
|
||||
{
|
||||
mpfr_t t;
|
||||
if(0==mpfr_init_set_str(t,s,default_base,default_rnd))
|
||||
{
|
||||
mpfr_set(mp,t,mpreal::default_rnd);
|
||||
mpfr_clear(t);
|
||||
}else{
|
||||
mpfr_clear(t);
|
||||
// cerr<<"fail to convert string"<<endl;
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t p1, p2, p3;
|
||||
|
||||
p1 = v1.get_prec();
|
||||
p2 = v2.get_prec();
|
||||
p3 = v3.get_prec();
|
||||
|
||||
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
|
||||
|
||||
mpfr_fma(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t p1, p2, p3;
|
||||
|
||||
p1 = v1.get_prec();
|
||||
p2 = v2.get_prec();
|
||||
p3 = v3.get_prec();
|
||||
|
||||
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
|
||||
|
||||
mpfr_fms(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t p1, p2;
|
||||
|
||||
p1 = v1.get_prec();
|
||||
p2 = v2.get_prec();
|
||||
|
||||
a.set_prec(p1>p2?p1:p2);
|
||||
|
||||
mpfr_agm(a.mp, v1.mp, v2.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t yp, xp;
|
||||
|
||||
yp = y.get_prec();
|
||||
xp = x.get_prec();
|
||||
|
||||
a.set_prec(yp>xp?yp:xp);
|
||||
|
||||
mpfr_hypot(a.mp, x.mp, y.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal x;
|
||||
mpfr_ptr* t;
|
||||
unsigned long int i;
|
||||
|
||||
t = new mpfr_ptr[n];
|
||||
for (i=0;i<n;i++) t[i] = (mpfr_ptr)tab[i].mp;
|
||||
mpfr_sum(x.mp,t,n,rnd_mode);
|
||||
delete[] t;
|
||||
return x;
|
||||
}
|
||||
|
||||
const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t yp, xp;
|
||||
|
||||
yp = y.get_prec();
|
||||
xp = x.get_prec();
|
||||
|
||||
a.set_prec(yp>xp?yp:xp);
|
||||
|
||||
mpfr_remainder(a.mp, x.mp, y.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t yp, xp;
|
||||
|
||||
yp = y.get_prec();
|
||||
xp = x.get_prec();
|
||||
|
||||
a.set_prec(yp>xp?yp:xp);
|
||||
|
||||
mpfr_remquo(a.mp,q, x.mp, y.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::string to_string(T t, std::ios_base & (*f)(std::ios_base&))
|
||||
{
|
||||
std::ostringstream oss;
|
||||
oss << f << t;
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
mpreal::operator std::string() const
|
||||
{
|
||||
return to_string();
|
||||
}
|
||||
|
||||
string mpreal::to_string(size_t n, int b, mp_rnd_t mode) const
|
||||
{
|
||||
char *s, *ns = NULL;
|
||||
size_t slen, nslen;
|
||||
mp_exp_t exp;
|
||||
string out;
|
||||
|
||||
if(mpfr_inf_p(mp))
|
||||
{
|
||||
if(mpfr_sgn(mp)>0) return "+@Inf@";
|
||||
else return "-@Inf@";
|
||||
}
|
||||
|
||||
if(mpfr_zero_p(mp)) return "0";
|
||||
if(mpfr_nan_p(mp)) return "@NaN@";
|
||||
|
||||
|
||||
s = mpfr_get_str(NULL,&exp,b,0,mp,mode);
|
||||
ns = mpfr_get_str(NULL,&exp,b,n,mp,mode);
|
||||
|
||||
if(s!=NULL && ns!=NULL)
|
||||
{
|
||||
slen = strlen(s);
|
||||
nslen = strlen(ns);
|
||||
if(nslen<=slen)
|
||||
{
|
||||
mpfr_free_str(s);
|
||||
s = ns;
|
||||
slen = nslen;
|
||||
}
|
||||
else {
|
||||
mpfr_free_str(ns);
|
||||
}
|
||||
|
||||
// Make human eye-friendly formatting if possible
|
||||
if (exp>0 && static_cast<size_t>(exp)<slen)
|
||||
{
|
||||
if(s[0]=='-')
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s+exp) ptr--;
|
||||
|
||||
if(ptr==s+exp) out = string(s,exp+1);
|
||||
else out = string(s,exp+1)+'.'+string(s+exp+1,ptr-(s+exp+1)+1);
|
||||
|
||||
//out = string(s,exp+1)+'.'+string(s+exp+1);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s+exp-1) ptr--;
|
||||
|
||||
if(ptr==s+exp-1) out = string(s,exp);
|
||||
else out = string(s,exp)+'.'+string(s+exp,ptr-(s+exp)+1);
|
||||
|
||||
//out = string(s,exp)+'.'+string(s+exp);
|
||||
}
|
||||
|
||||
}else{ // exp<0 || exp>slen
|
||||
if(s[0]=='-')
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s+1) ptr--;
|
||||
|
||||
if(ptr==s+1) out = string(s,2);
|
||||
else out = string(s,2)+'.'+string(s+2,ptr-(s+2)+1);
|
||||
|
||||
//out = string(s,2)+'.'+string(s+2);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s) ptr--;
|
||||
|
||||
if(ptr==s) out = string(s,1);
|
||||
else out = string(s,1)+'.'+string(s+1,ptr-(s+1)+1);
|
||||
|
||||
//out = string(s,1)+'.'+string(s+1);
|
||||
}
|
||||
|
||||
// Make final string
|
||||
if(--exp)
|
||||
{
|
||||
if(exp>0) out += "e+"+mpfr::to_string<mp_exp_t>(exp,std::dec);
|
||||
else out += "e"+mpfr::to_string<mp_exp_t>(exp,std::dec);
|
||||
}
|
||||
}
|
||||
|
||||
mpfr_free_str(s);
|
||||
return out;
|
||||
}else{
|
||||
return "conversion error!";
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// I/O
|
||||
ostream& operator<<(ostream& os, const mpreal& v)
|
||||
{
|
||||
return os<<v.to_string(os.precision());
|
||||
}
|
||||
|
||||
istream& operator>>(istream &is, mpreal& v)
|
||||
{
|
||||
char c;
|
||||
string s = "";
|
||||
mpfr_t t;
|
||||
|
||||
if(is.good())
|
||||
{
|
||||
is>>ws;
|
||||
while ((c = is.get())!=EOF)
|
||||
{
|
||||
if(c ==' ' || c == '\t' || c == '\n' || c == '\r')
|
||||
{
|
||||
is.putback(c);
|
||||
break;
|
||||
}
|
||||
s += c;
|
||||
}
|
||||
|
||||
if(s.size() != 0)
|
||||
{
|
||||
// Protect current value from alternation in case of input error
|
||||
// so some error handling(roll back) procedure can be used
|
||||
if(0==mpfr_init_set_str(t,s.c_str(),mpreal::default_base,mpreal::default_rnd))
|
||||
{
|
||||
mpfr_set(v.mp,t,mpreal::default_rnd);
|
||||
mpfr_clear(t);
|
||||
|
||||
}else{
|
||||
mpfr_clear(t);
|
||||
cerr<<"error reading from istream"<<endl;
|
||||
// throw an exception
|
||||
}
|
||||
}
|
||||
}
|
||||
return is;
|
||||
}
|
||||
}
|
||||
/*
|
||||
Multi-precision real number class. C++ interface fo MPFR library.
|
||||
Project homepage: http://www.holoborodko.com/pavel/
|
||||
Contact e-mail: pavel@holoborodko.com
|
||||
|
||||
Copyright (c) 2008-2010 Pavel Holoborodko
|
||||
|
||||
Core Developers:
|
||||
Pavel Holoborodko, Dmitriy Gubanov, Konstantin Holoborodko.
|
||||
|
||||
Contributors:
|
||||
Brian Gladman, Helmut Jarausch, Fokko Beekhof, Ulrich Mutze,
|
||||
Heinz van Saanen, Pere Constans, Peter van Hoof.
|
||||
|
||||
****************************************************************************
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
****************************************************************************
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Redistributions of any form whatsoever must retain the following
|
||||
acknowledgment:
|
||||
"
|
||||
This product includes software developed by Pavel Holoborodko
|
||||
Web: http://www.holoborodko.com/pavel/
|
||||
e-mail: pavel@holoborodko.com
|
||||
"
|
||||
|
||||
4. This software cannot be, by any means, used for any commercial
|
||||
purpose without the prior permission of the copyright holder.
|
||||
|
||||
Any of the above conditions can be waived if you get permission from
|
||||
the copyright holder.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
*/
|
||||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
#include "mpreal.h"
|
||||
#include "dlmalloc.h"
|
||||
|
||||
using std::ws;
|
||||
using std::cerr;
|
||||
using std::endl;
|
||||
using std::string;
|
||||
using std::ostream;
|
||||
using std::istream;
|
||||
|
||||
namespace mpfr{
|
||||
|
||||
mp_rnd_t mpreal::default_rnd = mpfr_get_default_rounding_mode();
|
||||
mp_prec_t mpreal::default_prec = mpfr_get_default_prec();
|
||||
int mpreal::default_base = 10;
|
||||
int mpreal::double_bits = -1;
|
||||
bool mpreal::is_custom_malloc = false;
|
||||
|
||||
// Default constructor: creates mp number and initializes it to 0.
|
||||
mpreal::mpreal()
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,default_prec);
|
||||
mpfr_set_ui(mp,0,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpreal& u)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,mpfr_get_prec(u.mp));
|
||||
mpfr_set(mp,u.mp,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpfr_t u)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,mpfr_get_prec(u));
|
||||
mpfr_set(mp,u,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpf_t u)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,mpf_get_prec(u));
|
||||
mpfr_set_f(mp,u,default_rnd);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpz_t u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_z(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const mpq_t u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_q(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
if(double_bits == -1 || fits_in_bits(u, double_bits))
|
||||
{
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_d(mp,u,mode);
|
||||
}
|
||||
else
|
||||
throw conversion_overflow();
|
||||
}
|
||||
|
||||
mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_ld(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_ui(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_ui(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_si(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_si(mp,u,mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_str(mp, s, base, mode);
|
||||
}
|
||||
|
||||
mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t mode)
|
||||
{
|
||||
set_custom_malloc();
|
||||
mpfr_init2(mp,prec);
|
||||
mpfr_set_str(mp, s.c_str(), base, mode);
|
||||
}
|
||||
|
||||
mpreal::~mpreal()
|
||||
{
|
||||
mpfr_clear(mp);
|
||||
}
|
||||
|
||||
// Operators - Assignment
|
||||
mpreal& mpreal::operator=(const char* s)
|
||||
{
|
||||
mpfr_t t;
|
||||
|
||||
set_custom_malloc();
|
||||
|
||||
if(0==mpfr_init_set_str(t,s,default_base,default_rnd))
|
||||
{
|
||||
// We will rewrite mp anyway, so use flash it and resize
|
||||
mpfr_set_prec(mp,mpfr_get_prec(t)); //<- added 01.04.2011
|
||||
mpfr_set(mp,t,mpreal::default_rnd);
|
||||
mpfr_clear(t);
|
||||
}else{
|
||||
mpfr_clear(t);
|
||||
// cerr<<"fail to convert string"<<endl;
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t p1, p2, p3;
|
||||
|
||||
p1 = v1.get_prec();
|
||||
p2 = v2.get_prec();
|
||||
p3 = v3.get_prec();
|
||||
|
||||
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
|
||||
|
||||
mpfr_fma(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t p1, p2, p3;
|
||||
|
||||
p1 = v1.get_prec();
|
||||
p2 = v2.get_prec();
|
||||
p3 = v3.get_prec();
|
||||
|
||||
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
|
||||
|
||||
mpfr_fms(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t p1, p2;
|
||||
|
||||
p1 = v1.get_prec();
|
||||
p2 = v2.get_prec();
|
||||
|
||||
a.set_prec(p1>p2?p1:p2);
|
||||
|
||||
mpfr_agm(a.mp, v1.mp, v2.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t yp, xp;
|
||||
|
||||
yp = y.get_prec();
|
||||
xp = x.get_prec();
|
||||
|
||||
a.set_prec(yp>xp?yp:xp);
|
||||
|
||||
mpfr_hypot(a.mp, x.mp, y.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal x;
|
||||
mpfr_ptr* t;
|
||||
unsigned long int i;
|
||||
|
||||
t = new mpfr_ptr[n];
|
||||
for (i=0;i<n;i++) t[i] = (mpfr_ptr)tab[i].mp;
|
||||
mpfr_sum(x.mp,t,n,rnd_mode);
|
||||
delete[] t;
|
||||
return x;
|
||||
}
|
||||
|
||||
const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t yp, xp;
|
||||
|
||||
yp = y.get_prec();
|
||||
xp = x.get_prec();
|
||||
|
||||
a.set_prec(yp>xp?yp:xp);
|
||||
|
||||
mpfr_remainder(a.mp, x.mp, y.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode)
|
||||
{
|
||||
mpreal a;
|
||||
mp_prec_t yp, xp;
|
||||
|
||||
yp = y.get_prec();
|
||||
xp = x.get_prec();
|
||||
|
||||
a.set_prec(yp>xp?yp:xp);
|
||||
|
||||
mpfr_remquo(a.mp,q, x.mp, y.mp, rnd_mode);
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
std::string to_string(T t, std::ios_base & (*f)(std::ios_base&))
|
||||
{
|
||||
std::ostringstream oss;
|
||||
oss << f << t;
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
mpreal::operator std::string() const
|
||||
{
|
||||
return to_string();
|
||||
}
|
||||
|
||||
std::string mpreal::to_string(size_t n, int b, mp_rnd_t mode) const
|
||||
{
|
||||
char *s, *ns = NULL;
|
||||
size_t slen, nslen;
|
||||
mp_exp_t exp;
|
||||
string out;
|
||||
|
||||
set_custom_malloc();
|
||||
|
||||
if(mpfr_inf_p(mp))
|
||||
{
|
||||
if(mpfr_sgn(mp)>0) return "+@Inf@";
|
||||
else return "-@Inf@";
|
||||
}
|
||||
|
||||
if(mpfr_zero_p(mp)) return "0";
|
||||
if(mpfr_nan_p(mp)) return "@NaN@";
|
||||
|
||||
s = mpfr_get_str(NULL,&exp,b,0,mp,mode);
|
||||
ns = mpfr_get_str(NULL,&exp,b,n,mp,mode);
|
||||
|
||||
if(s!=NULL && ns!=NULL)
|
||||
{
|
||||
slen = strlen(s);
|
||||
nslen = strlen(ns);
|
||||
if(nslen<=slen)
|
||||
{
|
||||
mpfr_free_str(s);
|
||||
s = ns;
|
||||
slen = nslen;
|
||||
}
|
||||
else {
|
||||
mpfr_free_str(ns);
|
||||
}
|
||||
|
||||
// Make human eye-friendly formatting if possible
|
||||
if (exp>0 && static_cast<size_t>(exp)<slen)
|
||||
{
|
||||
if(s[0]=='-')
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s+exp) ptr--;
|
||||
|
||||
if(ptr==s+exp) out = string(s,exp+1);
|
||||
else out = string(s,exp+1)+'.'+string(s+exp+1,ptr-(s+exp+1)+1);
|
||||
|
||||
//out = string(s,exp+1)+'.'+string(s+exp+1);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s+exp-1) ptr--;
|
||||
|
||||
if(ptr==s+exp-1) out = string(s,exp);
|
||||
else out = string(s,exp)+'.'+string(s+exp,ptr-(s+exp)+1);
|
||||
|
||||
//out = string(s,exp)+'.'+string(s+exp);
|
||||
}
|
||||
|
||||
}else{ // exp<0 || exp>slen
|
||||
if(s[0]=='-')
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s+1) ptr--;
|
||||
|
||||
if(ptr==s+1) out = string(s,2);
|
||||
else out = string(s,2)+'.'+string(s+2,ptr-(s+2)+1);
|
||||
|
||||
//out = string(s,2)+'.'+string(s+2);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Remove zeros starting from right end
|
||||
char* ptr = s+slen-1;
|
||||
while (*ptr=='0' && ptr>s) ptr--;
|
||||
|
||||
if(ptr==s) out = string(s,1);
|
||||
else out = string(s,1)+'.'+string(s+1,ptr-(s+1)+1);
|
||||
|
||||
//out = string(s,1)+'.'+string(s+1);
|
||||
}
|
||||
|
||||
// Make final string
|
||||
if(--exp)
|
||||
{
|
||||
if(exp>0) out += "e+"+mpfr::to_string<mp_exp_t>(exp,std::dec);
|
||||
else out += "e"+mpfr::to_string<mp_exp_t>(exp,std::dec);
|
||||
}
|
||||
}
|
||||
|
||||
mpfr_free_str(s);
|
||||
return out;
|
||||
}else{
|
||||
return "conversion error!";
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// I/O
|
||||
ostream& operator<<(ostream& os, const mpreal& v)
|
||||
{
|
||||
return os<<v.to_string(static_cast<size_t>(os.precision()));
|
||||
}
|
||||
|
||||
istream& operator>>(istream &is, mpreal& v)
|
||||
{
|
||||
char c;
|
||||
string s = "";
|
||||
mpfr_t t;
|
||||
|
||||
mpreal::set_custom_malloc();
|
||||
|
||||
if(is.good())
|
||||
{
|
||||
is>>ws;
|
||||
while ((c = is.get())!=EOF)
|
||||
{
|
||||
if(c ==' ' || c == '\t' || c == '\n' || c == '\r')
|
||||
{
|
||||
is.putback(c);
|
||||
break;
|
||||
}
|
||||
s += c;
|
||||
}
|
||||
|
||||
if(s.size() != 0)
|
||||
{
|
||||
// Protect current value from alternation in case of input error
|
||||
// so some error handling(roll back) procedure can be used
|
||||
|
||||
if(0==mpfr_init_set_str(t,s.c_str(),mpreal::default_base,mpreal::default_rnd))
|
||||
{
|
||||
mpfr_set(v.mp,t,mpreal::default_rnd);
|
||||
mpfr_clear(t);
|
||||
|
||||
}else{
|
||||
mpfr_clear(t);
|
||||
cerr<<"error reading from istream"<<endl;
|
||||
// throw an exception
|
||||
}
|
||||
}
|
||||
}
|
||||
return is;
|
||||
}
|
||||
|
||||
// Optimized dynamic memory allocation/(re-)deallocation.
|
||||
void * mpreal::mpreal_allocate(size_t alloc_size)
|
||||
{
|
||||
return(dlmalloc(alloc_size));
|
||||
}
|
||||
|
||||
void * mpreal::mpreal_reallocate(void *ptr, size_t /*old_size*/, size_t new_size)
|
||||
{
|
||||
return(dlrealloc(ptr,new_size));
|
||||
}
|
||||
|
||||
void mpreal::mpreal_free(void *ptr, size_t /*size*/)
|
||||
{
|
||||
dlfree(ptr);
|
||||
}
|
||||
|
||||
inline void mpreal::set_custom_malloc(void)
|
||||
{
|
||||
if(!is_custom_malloc)
|
||||
{
|
||||
mp_set_memory_functions(mpreal_allocate,mpreal_reallocate,mpreal_free);
|
||||
is_custom_malloc = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -41,4 +41,7 @@ void test_mpreal_support()
|
|||
}
|
||||
}
|
||||
|
||||
#include "mpreal.cpp"
|
||||
extern "C" {
|
||||
#include "mpreal/dlmalloc.c"
|
||||
}
|
||||
#include "mpreal/mpreal.cpp"
|
||||
|
|
|
|||
Loading…
Reference in New Issue