commit
c829e52c95
|
@ -1,4 +1,4 @@
|
|||
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
||||
node: 10219c95fe653d4962aa9db4946f6fbea96dd740
|
||||
node: c58038c56923e0fd86de3ded18e03df442e66dfb
|
||||
branch: 3.2
|
||||
tag: 3.2.4
|
||||
tag: 3.2.6
|
||||
|
|
|
@ -27,3 +27,5 @@ ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
|
|||
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
|
||||
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
|
||||
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3
|
||||
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
|
||||
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
|
||||
|
|
|
@ -123,7 +123,7 @@
|
|||
#undef bool
|
||||
#undef vector
|
||||
#undef pixel
|
||||
#elif defined __ARM_NEON__
|
||||
#elif defined __ARM_NEON
|
||||
#define EIGEN_VECTORIZE
|
||||
#define EIGEN_VECTORIZE_NEON
|
||||
#include <arm_neon.h>
|
||||
|
|
|
@ -235,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
|||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
||||
|
@ -434,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
|
|||
template<typename MatrixType, int _UpLo>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
|
||||
|
@ -457,7 +464,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
|||
*/
|
||||
template<typename MatrixType, int _UpLo>
|
||||
template<typename Derived>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
|
||||
{
|
||||
const Index size = w.rows();
|
||||
if (m_isInitialized)
|
||||
|
|
|
@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
|
|||
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store L
|
||||
* The strict upper part is not used and even not initialized.
|
||||
|
@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
|
|||
template<typename MatrixType, int _UpLo>
|
||||
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
m_matrix.resize(size, size);
|
||||
|
|
|
@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
|
|||
lda = m.outerStride(); \
|
||||
\
|
||||
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
|
||||
info = (info==0) ? -1 : 1; \
|
||||
info = (info==0) ? -1 : info>0 ? info-1 : size; \
|
||||
return info; \
|
||||
} \
|
||||
}; \
|
||||
|
|
|
@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
|
|||
typedef typename Derived1::Index Index;
|
||||
static inline void run(Derived1 &dst, const Derived2 &src)
|
||||
{
|
||||
typedef packet_traits<typename Derived1::Scalar> PacketTraits;
|
||||
typedef typename Derived1::Scalar Scalar;
|
||||
typedef packet_traits<Scalar> PacketTraits;
|
||||
enum {
|
||||
packetSize = PacketTraits::size,
|
||||
alignable = PacketTraits::AlignedOnScalar,
|
||||
dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
|
||||
dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
|
||||
dstAlignment = alignable ? Aligned : int(dstIsAligned),
|
||||
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
|
||||
};
|
||||
const Scalar *dst_ptr = &dst.coeffRef(0,0);
|
||||
if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0)
|
||||
{
|
||||
// the pointer is not aligend-on scalar, so alignment is not possible
|
||||
return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
|
||||
}
|
||||
const Index packetAlignedMask = packetSize - 1;
|
||||
const Index innerSize = dst.innerSize();
|
||||
const Index outerSize = dst.outerSize();
|
||||
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
|
||||
Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
|
||||
: internal::first_aligned(&dst.coeffRef(0,0), innerSize);
|
||||
Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
|
||||
|
||||
for(Index outer = 0; outer < outerSize; ++outer)
|
||||
{
|
||||
|
|
|
@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
|||
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
|
||||
: int(traits<XprType>::MaxColsAtCompileTime),
|
||||
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
|
||||
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||
IsDense = is_same<StorageKind,Dense>::value,
|
||||
IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||
: (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||
: XprTypeIsRowMajor,
|
||||
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
||||
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
|
|
|
@ -1,154 +0,0 @@
|
|||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_COMMAINITIALIZER_H
|
||||
#define EIGEN_COMMAINITIALIZER_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \class CommaInitializer
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
* \brief Helper class used by the comma initializer operator
|
||||
*
|
||||
* This class is internally used to implement the comma initializer feature. It is
|
||||
* the return type of MatrixBase::operator<<, and most of the time this is the only
|
||||
* way it is used.
|
||||
*
|
||||
* \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
|
||||
*/
|
||||
template<typename XprType>
|
||||
struct CommaInitializer
|
||||
{
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::Index Index;
|
||||
|
||||
inline CommaInitializer(XprType& xpr, const Scalar& s)
|
||||
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
|
||||
{
|
||||
m_xpr.coeffRef(0,0) = s;
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
|
||||
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
|
||||
{
|
||||
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
|
||||
}
|
||||
|
||||
/* Copy/Move constructor which transfers ownership. This is crucial in
|
||||
* absence of return value optimization to avoid assertions during destruction. */
|
||||
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
||||
inline CommaInitializer(const CommaInitializer& o)
|
||||
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
|
||||
// Mark original object as finished. In absence of R-value references we need to const_cast:
|
||||
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
|
||||
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
|
||||
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
|
||||
}
|
||||
|
||||
/* inserts a scalar value in the target matrix */
|
||||
CommaInitializer& operator,(const Scalar& s)
|
||||
{
|
||||
if (m_col==m_xpr.cols())
|
||||
{
|
||||
m_row+=m_currentBlockRows;
|
||||
m_col = 0;
|
||||
m_currentBlockRows = 1;
|
||||
eigen_assert(m_row<m_xpr.rows()
|
||||
&& "Too many rows passed to comma initializer (operator<<)");
|
||||
}
|
||||
eigen_assert(m_col<m_xpr.cols()
|
||||
&& "Too many coefficients passed to comma initializer (operator<<)");
|
||||
eigen_assert(m_currentBlockRows==1);
|
||||
m_xpr.coeffRef(m_row, m_col++) = s;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* inserts a matrix expression in the target matrix */
|
||||
template<typename OtherDerived>
|
||||
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
|
||||
{
|
||||
if(other.cols()==0 || other.rows()==0)
|
||||
return *this;
|
||||
if (m_col==m_xpr.cols())
|
||||
{
|
||||
m_row+=m_currentBlockRows;
|
||||
m_col = 0;
|
||||
m_currentBlockRows = other.rows();
|
||||
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
|
||||
&& "Too many rows passed to comma initializer (operator<<)");
|
||||
}
|
||||
eigen_assert(m_col<m_xpr.cols()
|
||||
&& "Too many coefficients passed to comma initializer (operator<<)");
|
||||
eigen_assert(m_currentBlockRows==other.rows());
|
||||
if (OtherDerived::SizeAtCompileTime != Dynamic)
|
||||
m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
|
||||
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
|
||||
(m_row, m_col) = other;
|
||||
else
|
||||
m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
|
||||
m_col += other.cols();
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline ~CommaInitializer()
|
||||
{
|
||||
eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
|
||||
&& m_col == m_xpr.cols()
|
||||
&& "Too few coefficients passed to comma initializer (operator<<)");
|
||||
}
|
||||
|
||||
/** \returns the built matrix once all its coefficients have been set.
|
||||
* Calling finished is 100% optional. Its purpose is to write expressions
|
||||
* like this:
|
||||
* \code
|
||||
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
|
||||
* \endcode
|
||||
*/
|
||||
inline XprType& finished() { return m_xpr; }
|
||||
|
||||
XprType& m_xpr; // target expression
|
||||
Index m_row; // current row id
|
||||
Index m_col; // current col id
|
||||
Index m_currentBlockRows; // current block height
|
||||
};
|
||||
|
||||
/** \anchor MatrixBaseCommaInitRef
|
||||
* Convenient operator to set the coefficients of a matrix.
|
||||
*
|
||||
* The coefficients must be provided in a row major order and exactly match
|
||||
* the size of the matrix. Otherwise an assertion is raised.
|
||||
*
|
||||
* Example: \include MatrixBase_set.cpp
|
||||
* Output: \verbinclude MatrixBase_set.out
|
||||
*
|
||||
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
|
||||
*
|
||||
* \sa CommaInitializer::finished(), class CommaInitializer
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
|
||||
{
|
||||
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
|
||||
}
|
||||
|
||||
/** \sa operator<<(const Scalar&) */
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline CommaInitializer<Derived>
|
||||
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
|
||||
{
|
||||
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_COMMAINITIALIZER_H
|
|
@ -183,10 +183,6 @@ template<typename Derived> class DenseBase
|
|||
/** \returns the number of nonzero coefficients which is in practice the number
|
||||
* of stored coefficients. */
|
||||
inline Index nonZeros() const { return size(); }
|
||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
||||
* In other words, this function returns
|
||||
* \code rows()==1 || cols()==1 \endcode
|
||||
* \sa rows(), cols(), IsVectorAtCompileTime. */
|
||||
|
||||
/** \returns the outer size.
|
||||
*
|
||||
|
@ -266,11 +262,13 @@ template<typename Derived> class DenseBase
|
|||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
/** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
template<typename OtherDerived>
|
||||
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** \internal Evaluates \a other into *this. \returns a reference to *this. */
|
||||
template<typename OtherDerived>
|
||||
Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
CommaInitializer<Derived> operator<< (const Scalar& s);
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
|
|||
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
||||
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
|
||||
|
||||
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
|
||||
};
|
||||
};
|
||||
|
|
|
@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
|
|||
};
|
||||
};
|
||||
|
||||
/** \internal
|
||||
* \brief Template functors for comparison of two scalars
|
||||
* \todo Implement packet-comparisons
|
||||
*/
|
||||
template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
|
||||
|
||||
template<typename Scalar, ComparisonName cmp>
|
||||
struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
|
||||
enum {
|
||||
Cost = NumTraits<Scalar>::AddCost,
|
||||
PacketAccess = false
|
||||
};
|
||||
};
|
||||
|
||||
template<ComparisonName Cmp, typename Scalar>
|
||||
struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
|
||||
typedef bool type;
|
||||
};
|
||||
|
||||
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
|
||||
};
|
||||
|
||||
// unary functors:
|
||||
|
||||
/** \internal
|
||||
|
|
|
@ -257,7 +257,7 @@ template<typename Lhs, typename Rhs>
|
|||
class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
|
||||
{
|
||||
template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||
|
||||
public:
|
||||
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
|
||||
|
@ -281,22 +281,22 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
|
|||
|
||||
template<typename Dest>
|
||||
inline void evalTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, set(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest>
|
||||
inline void addTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, add(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest>
|
||||
inline void subTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, sub(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||
{
|
||||
internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major<Dest>());
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
|||
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
|
||||
}
|
||||
|
||||
inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||
explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
checkSanity();
|
||||
|
@ -157,7 +157,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
|||
internal::inner_stride_at_compile_time<Derived>::ret==1),
|
||||
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
|
||||
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
|
||||
&& "data is not aligned");
|
||||
&& "input pointer is not aligned on a 16 byte boundary");
|
||||
}
|
||||
|
||||
PointerType m_data;
|
||||
|
|
|
@ -294,7 +294,7 @@ struct hypot_impl
|
|||
RealScalar _x = abs(x);
|
||||
RealScalar _y = abs(y);
|
||||
RealScalar p = (max)(_x, _y);
|
||||
if(p==RealScalar(0)) return 0;
|
||||
if(p==RealScalar(0)) return RealScalar(0);
|
||||
RealScalar q = (min)(_x, _y);
|
||||
RealScalar qp = q/p;
|
||||
return p * sqrt(RealScalar(1) + qp*qp);
|
||||
|
|
|
@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
|
|||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
|
||||
|
||||
template<typename MatrixPower, typename Lhs, typename Rhs>
|
||||
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const MatrixBase<OtherDerived>& other);
|
||||
|
|
|
@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
|
|||
template<typename Other> friend
|
||||
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
|
||||
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
|
||||
|
||||
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
|
||||
*
|
||||
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
|
||||
*/
|
||||
Index determinant() const
|
||||
{
|
||||
Index res = 1;
|
||||
Index n = size();
|
||||
Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
|
||||
mask.fill(false);
|
||||
Index r = 0;
|
||||
while(r < n)
|
||||
{
|
||||
// search for the next seed
|
||||
while(r<n && mask[r]) r++;
|
||||
if(r>=n)
|
||||
break;
|
||||
// we got one, let's follow it until we are back to the seed
|
||||
Index k0 = r++;
|
||||
mask.coeffRef(k0) = true;
|
||||
for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
|
||||
{
|
||||
mask.coeffRef(k) = true;
|
||||
res = -res;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
|
|
|
@ -437,6 +437,22 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
|||
}
|
||||
#endif
|
||||
|
||||
/** Copy constructor */
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)
|
||||
: m_storage()
|
||||
{
|
||||
_check_template_params();
|
||||
lazyAssign(other);
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)
|
||||
: m_storage()
|
||||
{
|
||||
_check_template_params();
|
||||
lazyAssign(other);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
|
||||
: m_storage(a_size, nbRows, nbCols)
|
||||
{
|
||||
|
@ -573,6 +589,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
|||
: (rows() == other.rows() && cols() == other.cols())))
|
||||
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(other);
|
||||
if(this->size()==0)
|
||||
resizeLike(other);
|
||||
#else
|
||||
resizeLike(other);
|
||||
#endif
|
||||
|
|
|
@ -108,7 +108,8 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
|
|||
OuterStrideMatch = Derived::IsVectorAtCompileTime
|
||||
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
|
||||
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
|
||||
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
|
||||
ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
|
||||
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
|
||||
};
|
||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||
};
|
||||
|
@ -187,9 +188,11 @@ protected:
|
|||
template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
||||
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
|
||||
{
|
||||
private:
|
||||
typedef internal::traits<Ref> Traits;
|
||||
template<typename Derived>
|
||||
inline Ref(const PlainObjectBase<Derived>& expr);
|
||||
inline Ref(const PlainObjectBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
|
||||
public:
|
||||
|
||||
typedef RefBase<Ref> Base;
|
||||
|
@ -198,13 +201,15 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
|||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename Derived>
|
||||
inline Ref(PlainObjectBase<Derived>& expr)
|
||||
inline Ref(PlainObjectBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
template<typename Derived>
|
||||
inline Ref(const DenseBase<Derived>& expr)
|
||||
inline Ref(const DenseBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
|
||||
#else
|
||||
template<typename Derived>
|
||||
inline Ref(DenseBase<Derived>& expr)
|
||||
|
@ -231,13 +236,23 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
|
|||
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const DenseBase<Derived>& expr)
|
||||
inline Ref(const DenseBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
|
||||
{
|
||||
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
|
||||
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
|
||||
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
|
||||
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
||||
}
|
||||
|
||||
inline Ref(const Ref& other) : Base(other) {
|
||||
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
|
||||
}
|
||||
|
||||
template<typename OtherRef>
|
||||
inline Ref(const RefBase<OtherRef>& other) {
|
||||
construct(other.derived(), typename Traits::template match<OtherRef>::type());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
|
|
|
@ -72,6 +72,8 @@ template<typename Derived> class ReturnByValue
|
|||
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
|
||||
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
|
||||
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
|
||||
template<int LoadMode> Unusable& packet(Index) const;
|
||||
template<int LoadMode> Unusable& packet(Index, Index) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -83,6 +85,15 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
|||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
|
||||
{
|
||||
other.evalTo(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_RETURNBYVALUE_H
|
||||
|
|
|
@ -384,6 +384,7 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
|||
a_lo = vget_low_s32(a);
|
||||
a_hi = vget_high_s32(a);
|
||||
max = vpmax_s32(a_lo, a_hi);
|
||||
max = vpmax_s32(max, max);
|
||||
|
||||
return vget_lane_s32(max, 0);
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ class CoeffBasedProduct
|
|||
};
|
||||
|
||||
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
|
||||
Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
|
||||
Unroll ? InnerSize : Dynamic,
|
||||
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
|
||||
|
||||
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
|
||||
|
@ -185,7 +185,7 @@ class CoeffBasedProduct
|
|||
{
|
||||
PacketScalar res;
|
||||
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
|
||||
Unroll ? InnerSize : Dynamic,
|
||||
_LhsNested, _RhsNested, PacketScalar, LoadMode>
|
||||
::run(row, col, m_lhs, m_rhs, res);
|
||||
return res;
|
||||
|
@ -243,7 +243,17 @@ struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
|||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
{
|
||||
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
|
||||
res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
|
||||
res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
{
|
||||
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -251,9 +261,9 @@ template<typename Lhs, typename Rhs, typename RetScalar>
|
|||
struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
|
||||
{
|
||||
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
|
||||
res = RetScalar(0);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -293,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
|
|||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
|
||||
{
|
||||
res = 0;
|
||||
}
|
||||
};
|
||||
|
||||
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
|
@ -302,8 +322,7 @@ struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, Re
|
|||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
{
|
||||
Packet pres;
|
||||
product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
|
||||
product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
|
||||
product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
|
||||
res = predux(pres);
|
||||
}
|
||||
};
|
||||
|
@ -371,7 +390,7 @@ struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
|||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
{
|
||||
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -382,12 +401,12 @@ struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
|||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
{
|
||||
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
|
@ -397,7 +416,7 @@ struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
|||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
|
@ -406,16 +425,35 @@ struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
|||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
|
||||
{
|
||||
res = pset1<Packet>(0);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
|
||||
{
|
||||
res = pset1<Packet>(0);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
||||
{
|
||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
||||
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
|
||||
for(Index i = 1; i < lhs.cols(); ++i)
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
|
||||
res = pset1<Packet>(0);
|
||||
for(Index i = 0; i < lhs.cols(); ++i)
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -425,10 +463,9 @@ struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
|||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
||||
{
|
||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
||||
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
|
||||
for(Index i = 1; i < lhs.cols(); ++i)
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
|
||||
res = pset1<Packet>(0);
|
||||
for(Index i = 0; i < lhs.cols(); ++i)
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
|
|||
if(transpose)
|
||||
std::swap(rows,cols);
|
||||
|
||||
Index blockCols = (cols / threads) & ~Index(0x3);
|
||||
Index blockRows = (rows / threads) & ~Index(0x7);
|
||||
|
||||
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
|
||||
|
||||
#pragma omp parallel for schedule(static,1) num_threads(threads)
|
||||
for(Index i=0; i<threads; ++i)
|
||||
#pragma omp parallel num_threads(threads)
|
||||
{
|
||||
Index i = omp_get_thread_num();
|
||||
// Note that the actual number of threads might be lower than the number of request ones.
|
||||
Index actual_threads = omp_get_num_threads();
|
||||
|
||||
Index blockCols = (cols / actual_threads) & ~Index(0x3);
|
||||
Index blockRows = (rows / actual_threads) & ~Index(0x7);
|
||||
|
||||
Index r0 = i*blockRows;
|
||||
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
|
||||
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
|
||||
|
||||
Index c0 = i*blockCols;
|
||||
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
|
||||
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
|
||||
|
||||
info[i].rhs_start = c0;
|
||||
info[i].rhs_length = actualBlockCols;
|
||||
|
|
|
@ -109,7 +109,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
|
|||
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
||||
if (rows != depth) { \
|
||||
\
|
||||
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
|
||||
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
|
||||
\
|
||||
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
|
||||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||
|
@ -223,7 +223,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
|
|||
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
||||
if (cols != depth) { \
|
||||
\
|
||||
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
|
||||
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
|
||||
\
|
||||
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
|
||||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||
|
|
|
@ -433,6 +433,19 @@ struct MatrixXpr {};
|
|||
/** The type used to identify an array expression */
|
||||
struct ArrayXpr {};
|
||||
|
||||
namespace internal {
|
||||
/** \internal
|
||||
* Constants for comparison functors
|
||||
*/
|
||||
enum ComparisonName {
|
||||
cmp_EQ = 0,
|
||||
cmp_LT = 1,
|
||||
cmp_LE = 2,
|
||||
cmp_UNORD = 3,
|
||||
cmp_NEQ = 4
|
||||
};
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CONSTANTS_H
|
||||
|
|
|
@ -76,6 +76,38 @@
|
|||
#include <mkl_lapacke.h>
|
||||
#define EIGEN_MKL_VML_THRESHOLD 128
|
||||
|
||||
/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
|
||||
/* MKL_BLAS, etc are not defined in 11.2 */
|
||||
#ifdef MKL_DOMAIN_ALL
|
||||
#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_BLAS
|
||||
#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_FFT
|
||||
#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_VML
|
||||
#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_VML MKL_VML
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_PARDISO
|
||||
#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
typedef std::complex<double> dcomplex;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
#define EIGEN_MAJOR_VERSION 2
|
||||
#define EIGEN_MINOR_VERSION 4
|
||||
#define EIGEN_MINOR_VERSION 6
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
|
@ -278,6 +278,7 @@ namespace Eigen {
|
|||
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
|
||||
#endif
|
||||
|
||||
#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
|
||||
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
|
||||
|
||||
#if EIGEN_ALIGN_STATICALLY
|
||||
|
@ -332,8 +333,11 @@ namespace Eigen {
|
|||
}
|
||||
#endif
|
||||
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
|
||||
/** \internal
|
||||
* \brief Macro to manually inherit assignment operators.
|
||||
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
|
||||
*/
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
|
||||
|
||||
/**
|
||||
* Just a side note. Commenting within defines works only by documenting
|
||||
|
|
|
@ -523,7 +523,7 @@ template<typename T> struct smart_copy_helper<T,false> {
|
|||
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
|
||||
// to the appropriate stack allocation function
|
||||
#ifndef EIGEN_ALLOCA
|
||||
#if (defined __linux__)
|
||||
#if (defined __linux__) || (defined __APPLE__) || (defined alloca)
|
||||
#define EIGEN_ALLOCA alloca
|
||||
#elif defined(_MSC_VER)
|
||||
#define EIGEN_ALLOCA _alloca
|
||||
|
|
|
@ -234,6 +234,12 @@ template<typename _MatrixType> class ComplexEigenSolver
|
|||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
EigenvectorType m_eivec;
|
||||
EigenvalueType m_eivalues;
|
||||
ComplexSchur<MatrixType> m_schur;
|
||||
|
@ -251,6 +257,8 @@ template<typename MatrixType>
|
|||
ComplexEigenSolver<MatrixType>&
|
||||
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
// this code is inspired from Jampack
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
||||
|
|
|
@ -298,6 +298,13 @@ template<typename _MatrixType> class EigenSolver
|
|||
void doComputeEigenvectors();
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
|
||||
}
|
||||
|
||||
MatrixType m_eivec;
|
||||
EigenvalueType m_eivalues;
|
||||
bool m_isInitialized;
|
||||
|
@ -364,6 +371,8 @@ template<typename MatrixType>
|
|||
EigenSolver<MatrixType>&
|
||||
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
|
|
@ -263,6 +263,13 @@ template<typename _MatrixType> class GeneralizedEigenSolver
|
|||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
|
||||
}
|
||||
|
||||
MatrixType m_eivec;
|
||||
ComplexVectorType m_alphas;
|
||||
VectorType m_betas;
|
||||
|
@ -290,6 +297,8 @@ template<typename MatrixType>
|
|||
GeneralizedEigenSolver<MatrixType>&
|
||||
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
|
||||
|
|
|
@ -240,10 +240,10 @@ namespace Eigen {
|
|||
m_S.coeffRef(i,j) = Scalar(0.0);
|
||||
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
|
||||
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
|
||||
// update Q
|
||||
if (m_computeQZ)
|
||||
m_Q.applyOnTheRight(i-1,i,G);
|
||||
}
|
||||
// update Q
|
||||
if (m_computeQZ)
|
||||
m_Q.applyOnTheRight(i-1,i,G);
|
||||
// kill T(i,i-1)
|
||||
if(m_T.coeff(i,i-1)!=Scalar(0))
|
||||
{
|
||||
|
@ -251,10 +251,10 @@ namespace Eigen {
|
|||
m_T.coeffRef(i,i-1) = Scalar(0.0);
|
||||
m_S.applyOnTheRight(i,i-1,G);
|
||||
m_T.topRows(i).applyOnTheRight(i,i-1,G);
|
||||
// update Z
|
||||
if (m_computeQZ)
|
||||
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
|
||||
}
|
||||
// update Z
|
||||
if (m_computeQZ)
|
||||
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ template<typename _MatrixType> class RealSchur
|
|||
typedef Matrix<Scalar,3,1> Vector3s;
|
||||
|
||||
Scalar computeNormOfT();
|
||||
Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
|
||||
Index findSmallSubdiagEntry(Index iu);
|
||||
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
|
||||
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
|
||||
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
|
||||
|
@ -286,7 +286,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
|
|||
{
|
||||
while (iu >= 0)
|
||||
{
|
||||
Index il = findSmallSubdiagEntry(iu, norm);
|
||||
Index il = findSmallSubdiagEntry(iu);
|
||||
|
||||
// Check for convergence
|
||||
if (il == iu) // One root found
|
||||
|
@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
|||
|
||||
/** \internal Look for single small sub-diagonal element and returns its index */
|
||||
template<typename MatrixType>
|
||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
|
||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
|
||||
{
|
||||
using std::abs;
|
||||
Index res = iu;
|
||||
while (res > 0)
|
||||
{
|
||||
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
|
||||
if (s == 0.0)
|
||||
s = norm;
|
||||
if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
|
||||
if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
|
||||
break;
|
||||
res--;
|
||||
}
|
||||
|
@ -457,9 +455,7 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
|
|||
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
|
||||
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
|
||||
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,6 +80,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
|||
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
|
||||
|
||||
/** \brief Real scalar type for \p _MatrixType.
|
||||
*
|
||||
|
@ -225,7 +227,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
|||
*
|
||||
* \sa eigenvalues()
|
||||
*/
|
||||
const MatrixType& eigenvectors() const
|
||||
const EigenvectorsType& eigenvectors() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
|
@ -351,7 +353,12 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
|||
#endif // EIGEN2_SUPPORT
|
||||
|
||||
protected:
|
||||
MatrixType m_eivec;
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
EigenvectorsType m_eivec;
|
||||
RealVectorType m_eivalues;
|
||||
typename TridiagonalizationType::SubDiagonalType m_subdiag;
|
||||
ComputationInfo m_info;
|
||||
|
@ -376,7 +383,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
|||
* "implicit symmetric QR step with Wilkinson shift"
|
||||
*/
|
||||
namespace internal {
|
||||
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
|
||||
template<typename RealScalar, typename Scalar, typename Index>
|
||||
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
|
||||
}
|
||||
|
||||
|
@ -384,6 +391,8 @@ template<typename MatrixType>
|
|||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
::compute(const MatrixType& matrix, int options)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::abs;
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||
|
@ -406,7 +415,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
|||
|
||||
// declare some aliases
|
||||
RealVectorType& diag = m_eivalues;
|
||||
MatrixType& mat = m_eivec;
|
||||
EigenvectorsType& mat = m_eivec;
|
||||
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
mat = matrix.template triangularView<Lower>();
|
||||
|
@ -442,7 +451,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
|||
while (start>0 && m_subdiag[start-1]!=0)
|
||||
start--;
|
||||
|
||||
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
||||
internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
||||
}
|
||||
|
||||
if (iter <= m_maxIterations * n)
|
||||
|
@ -490,7 +499,13 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
|
|||
typedef typename SolverType::MatrixType MatrixType;
|
||||
typedef typename SolverType::RealVectorType VectorType;
|
||||
typedef typename SolverType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
||||
|
||||
/** \internal
|
||||
* Computes the roots of the characteristic polynomial of \a m.
|
||||
* For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
|
||||
*/
|
||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
@ -510,158 +525,123 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
|
|||
// Construct the parameters used in classifying the roots of the equation
|
||||
// and in solving the equation for the roots in closed form.
|
||||
Scalar c2_over_3 = c2*s_inv3;
|
||||
Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
|
||||
if (a_over_3 > Scalar(0))
|
||||
Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
|
||||
if(a_over_3<Scalar(0))
|
||||
a_over_3 = Scalar(0);
|
||||
|
||||
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
|
||||
|
||||
Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3;
|
||||
if (q > Scalar(0))
|
||||
Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
|
||||
if(q<Scalar(0))
|
||||
q = Scalar(0);
|
||||
|
||||
// Compute the eigenvalues by solving for the roots of the polynomial.
|
||||
Scalar rho = sqrt(-a_over_3);
|
||||
Scalar theta = atan2(sqrt(-q),half_b)*s_inv3;
|
||||
Scalar rho = sqrt(a_over_3);
|
||||
Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
|
||||
Scalar cos_theta = cos(theta);
|
||||
Scalar sin_theta = sin(theta);
|
||||
roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
||||
roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
|
||||
roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
|
||||
|
||||
// Sort in increasing order.
|
||||
if (roots(0) >= roots(1))
|
||||
std::swap(roots(0),roots(1));
|
||||
if (roots(1) >= roots(2))
|
||||
{
|
||||
std::swap(roots(1),roots(2));
|
||||
if (roots(0) >= roots(1))
|
||||
std::swap(roots(0),roots(1));
|
||||
}
|
||||
// roots are already sorted, since cos is monotonically decreasing on [0, pi]
|
||||
roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
|
||||
roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
|
||||
roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
||||
}
|
||||
|
||||
|
||||
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
|
||||
{
|
||||
using std::abs;
|
||||
Index i0;
|
||||
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
|
||||
mat.diagonal().cwiseAbs().maxCoeff(&i0);
|
||||
// mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
|
||||
// so let's save it:
|
||||
representative = mat.col(i0);
|
||||
Scalar n0, n1;
|
||||
VectorType c0, c1;
|
||||
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
|
||||
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
|
||||
if(n0>n1) res = c0/std::sqrt(n0);
|
||||
else res = c1/std::sqrt(n1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
||||
{
|
||||
using std::sqrt;
|
||||
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
|
||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||
&& (options&EigVecMask)!=EigVecMask
|
||||
&& "invalid option parameter");
|
||||
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
||||
|
||||
MatrixType& eivecs = solver.m_eivec;
|
||||
EigenvectorsType& eivecs = solver.m_eivec;
|
||||
VectorType& eivals = solver.m_eivalues;
|
||||
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
Scalar scale = mat.cwiseAbs().maxCoeff();
|
||||
MatrixType scaledMat = mat / scale;
|
||||
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
Scalar shift = mat.trace() / Scalar(3);
|
||||
// TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
|
||||
MatrixType scaledMat = mat.template selfadjointView<Lower>();
|
||||
scaledMat.diagonal().array() -= shift;
|
||||
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
|
||||
if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
|
||||
|
||||
// compute the eigenvalues
|
||||
computeRoots(scaledMat,eivals);
|
||||
|
||||
// compute the eigen vectors
|
||||
// compute the eigenvectors
|
||||
if(computeEigenvectors)
|
||||
{
|
||||
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
|
||||
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
|
||||
{
|
||||
// All three eigenvalues are numerically the same
|
||||
eivecs.setIdentity();
|
||||
}
|
||||
else
|
||||
{
|
||||
scaledMat = scaledMat.template selfadjointView<Lower>();
|
||||
MatrixType tmp;
|
||||
tmp = scaledMat;
|
||||
|
||||
// Compute the eigenvector of the most distinct eigenvalue
|
||||
Scalar d0 = eivals(2) - eivals(1);
|
||||
Scalar d1 = eivals(1) - eivals(0);
|
||||
int k = d0 > d1 ? 2 : 0;
|
||||
d0 = d0 > d1 ? d0 : d1;
|
||||
|
||||
tmp.diagonal().array () -= eivals(k);
|
||||
VectorType cross;
|
||||
Scalar n;
|
||||
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
|
||||
|
||||
if(n>safeNorm2)
|
||||
Index k(0), l(2);
|
||||
if(d0 > d1)
|
||||
{
|
||||
eivecs.col(k) = cross / sqrt(n);
|
||||
std::swap(k,l);
|
||||
d0 = d1;
|
||||
}
|
||||
|
||||
// Compute the eigenvector of index k
|
||||
{
|
||||
tmp.diagonal().array () -= eivals(k);
|
||||
// By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
|
||||
extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
|
||||
}
|
||||
|
||||
// Compute eigenvector of index l
|
||||
if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
|
||||
{
|
||||
// If d0 is too small, then the two other eigenvalues are numerically the same,
|
||||
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
|
||||
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
|
||||
eivecs.col(l).normalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
|
||||
tmp = scaledMat;
|
||||
tmp.diagonal().array () -= eivals(l);
|
||||
|
||||
if(n>safeNorm2)
|
||||
{
|
||||
eivecs.col(k) = cross / sqrt(n);
|
||||
}
|
||||
else
|
||||
{
|
||||
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
|
||||
|
||||
if(n>safeNorm2)
|
||||
{
|
||||
eivecs.col(k) = cross / sqrt(n);
|
||||
}
|
||||
else
|
||||
{
|
||||
// the input matrix and/or the eigenvaues probably contains some inf/NaN,
|
||||
// => exit
|
||||
// scale back to the original size.
|
||||
eivals *= scale;
|
||||
|
||||
solver.m_info = NumericalIssue;
|
||||
solver.m_isInitialized = true;
|
||||
solver.m_eigenvectorsOk = computeEigenvectors;
|
||||
return;
|
||||
}
|
||||
}
|
||||
VectorType dummy;
|
||||
extract_kernel(tmp, eivecs.col(l), dummy);
|
||||
}
|
||||
|
||||
tmp = scaledMat;
|
||||
tmp.diagonal().array() -= eivals(1);
|
||||
|
||||
if(d0<=Eigen::NumTraits<Scalar>::epsilon())
|
||||
{
|
||||
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
|
||||
}
|
||||
else
|
||||
{
|
||||
n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
|
||||
if(n>safeNorm2)
|
||||
{
|
||||
eivecs.col(1) = cross / sqrt(n);
|
||||
}
|
||||
else
|
||||
{
|
||||
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(1) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm();
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(1) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
// we should never reach this point,
|
||||
// if so the last two eigenvalues are likely to be very close to each other
|
||||
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that eivecs[1] is orthogonal to eivecs[2]
|
||||
// FIXME: this step should not be needed
|
||||
Scalar d = eivecs.col(1).dot(eivecs.col(k));
|
||||
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
|
||||
}
|
||||
|
||||
eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized();
|
||||
// Compute last eigenvector from the other two
|
||||
eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
|
||||
}
|
||||
}
|
||||
|
||||
// Rescale back to the original size.
|
||||
eivals *= scale;
|
||||
eivals.array() += shift;
|
||||
|
||||
solver.m_info = Success;
|
||||
solver.m_isInitialized = true;
|
||||
|
@ -675,11 +655,12 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
|||
typedef typename SolverType::MatrixType MatrixType;
|
||||
typedef typename SolverType::RealVectorType VectorType;
|
||||
typedef typename SolverType::Scalar Scalar;
|
||||
typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
||||
|
||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||
{
|
||||
using std::sqrt;
|
||||
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
|
||||
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
|
||||
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
|
||||
roots(0) = t1 - t0;
|
||||
roots(1) = t1 + t0;
|
||||
|
@ -688,13 +669,15 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
|||
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
||||
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
|
||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||
&& (options&EigVecMask)!=EigVecMask
|
||||
&& "invalid option parameter");
|
||||
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
||||
|
||||
MatrixType& eivecs = solver.m_eivec;
|
||||
EigenvectorsType& eivecs = solver.m_eivec;
|
||||
VectorType& eivals = solver.m_eivalues;
|
||||
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
|
@ -708,22 +691,29 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
|||
// compute the eigen vectors
|
||||
if(computeEigenvectors)
|
||||
{
|
||||
scaledMat.diagonal().array () -= eivals(1);
|
||||
Scalar a2 = numext::abs2(scaledMat(0,0));
|
||||
Scalar c2 = numext::abs2(scaledMat(1,1));
|
||||
Scalar b2 = numext::abs2(scaledMat(1,0));
|
||||
if(a2>c2)
|
||||
if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
||||
eivecs.col(1) /= sqrt(a2+b2);
|
||||
eivecs.setIdentity();
|
||||
}
|
||||
else
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
||||
eivecs.col(1) /= sqrt(c2+b2);
|
||||
}
|
||||
scaledMat.diagonal().array () -= eivals(1);
|
||||
Scalar a2 = numext::abs2(scaledMat(0,0));
|
||||
Scalar c2 = numext::abs2(scaledMat(1,1));
|
||||
Scalar b2 = numext::abs2(scaledMat(1,0));
|
||||
if(a2>c2)
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
||||
eivecs.col(1) /= sqrt(a2+b2);
|
||||
}
|
||||
else
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
||||
eivecs.col(1) /= sqrt(c2+b2);
|
||||
}
|
||||
|
||||
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
||||
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
||||
}
|
||||
}
|
||||
|
||||
// Rescale back to the original size.
|
||||
|
@ -746,7 +736,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
|||
}
|
||||
|
||||
namespace internal {
|
||||
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
|
||||
template<typename RealScalar, typename Scalar, typename Index>
|
||||
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
|
||||
{
|
||||
using std::abs;
|
||||
|
@ -798,8 +788,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
|
|||
// apply the givens rotation to the unit matrix Q = Q * G
|
||||
if (matrixQ)
|
||||
{
|
||||
// FIXME if StorageOrder == RowMajor this operation is not very efficient
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
|
||||
q.applyOnTheRight(k,k+1,rot);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,12 @@ namespace Eigen {
|
|||
*
|
||||
* \brief An axis aligned box
|
||||
*
|
||||
* \param _Scalar the type of the scalar coefficients
|
||||
* \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||
* \tparam _Scalar the type of the scalar coefficients
|
||||
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||
*
|
||||
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
|
||||
* \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
|
||||
* \sa alignedboxtypedefs
|
||||
*/
|
||||
template <typename _Scalar, int _AmbientDim>
|
||||
class AlignedBox
|
||||
|
@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
|
||||
enum CornerType
|
||||
{
|
||||
/** 1D names */
|
||||
/** 1D names @{ */
|
||||
Min=0, Max=1,
|
||||
/** @} */
|
||||
|
||||
/** Added names for 2D */
|
||||
/** Identifier for 2D corner @{ */
|
||||
BottomLeft=0, BottomRight=1,
|
||||
TopLeft=2, TopRight=3,
|
||||
/** @} */
|
||||
|
||||
/** Added names for 3D */
|
||||
/** Identifier for 3D corner @{ */
|
||||
BottomLeftFloor=0, BottomRightFloor=1,
|
||||
TopLeftFloor=2, TopRightFloor=3,
|
||||
BottomLeftCeil=4, BottomRightCeil=5,
|
||||
TopLeftCeil=6, TopRightCeil=7
|
||||
/** @} */
|
||||
};
|
||||
|
||||
|
||||
|
@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
|
||||
{ setEmpty(); }
|
||||
|
||||
/** Constructs a box with extremities \a _min and \a _max. */
|
||||
/** Constructs a box with extremities \a _min and \a _max.
|
||||
* \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
|
||||
template<typename OtherVectorType1, typename OtherVectorType2>
|
||||
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
|
||||
|
||||
/** Constructs a box containing a single point \a p. */
|
||||
template<typename Derived>
|
||||
inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
|
||||
{
|
||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
||||
m_min = p;
|
||||
m_max = p;
|
||||
}
|
||||
inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
|
||||
{ }
|
||||
|
||||
~AlignedBox() {}
|
||||
|
||||
/** \returns the dimension in which the box holds */
|
||||
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
|
||||
|
||||
/** \deprecated use isEmpty */
|
||||
/** \deprecated use isEmpty() */
|
||||
inline bool isNull() const { return isEmpty(); }
|
||||
|
||||
/** \deprecated use setEmpty */
|
||||
/** \deprecated use setEmpty() */
|
||||
inline void setNull() { setEmpty(); }
|
||||
|
||||
/** \returns true if the box is empty. */
|
||||
/** \returns true if the box is empty.
|
||||
* \sa setEmpty */
|
||||
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
|
||||
|
||||
/** Makes \c *this an empty box. */
|
||||
/** Makes \c *this an empty box.
|
||||
* \sa isEmpty */
|
||||
inline void setEmpty()
|
||||
{
|
||||
m_min.setConstant( ScalarTraits::highest() );
|
||||
|
@ -159,7 +163,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
* a uniform distribution */
|
||||
inline VectorType sample() const
|
||||
{
|
||||
VectorType r;
|
||||
VectorType r(dim());
|
||||
for(Index d=0; d<dim(); ++d)
|
||||
{
|
||||
if(!ScalarTraits::IsInteger)
|
||||
|
@ -175,27 +179,34 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
|
||||
/** \returns true if the point \a p is inside the box \c *this. */
|
||||
template<typename Derived>
|
||||
inline bool contains(const MatrixBase<Derived>& a_p) const
|
||||
inline bool contains(const MatrixBase<Derived>& p) const
|
||||
{
|
||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
||||
return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
|
||||
typename internal::nested<Derived,2>::type p_n(p.derived());
|
||||
return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
|
||||
}
|
||||
|
||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||
inline bool contains(const AlignedBox& b) const
|
||||
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||
/** \returns true if the box \a b is intersecting the box \c *this.
|
||||
* \sa intersection, clamp */
|
||||
inline bool intersects(const AlignedBox& b) const
|
||||
{ return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
|
||||
* \sa extend(const AlignedBox&) */
|
||||
template<typename Derived>
|
||||
inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
|
||||
inline AlignedBox& extend(const MatrixBase<Derived>& p)
|
||||
{
|
||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
||||
m_min = m_min.cwiseMin(p);
|
||||
m_max = m_max.cwiseMax(p);
|
||||
typename internal::nested<Derived,2>::type p_n(p.derived());
|
||||
m_min = m_min.cwiseMin(p_n);
|
||||
m_max = m_max.cwiseMax(p_n);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
|
||||
* \sa merged, extend(const MatrixBase&) */
|
||||
inline AlignedBox& extend(const AlignedBox& b)
|
||||
{
|
||||
m_min = m_min.cwiseMin(b.m_min);
|
||||
|
@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
return *this;
|
||||
}
|
||||
|
||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this.
|
||||
* \note If the boxes don't intersect, the resulting box is empty.
|
||||
* \sa intersection(), intersects() */
|
||||
inline AlignedBox& clamp(const AlignedBox& b)
|
||||
{
|
||||
m_min = m_min.cwiseMax(b.m_min);
|
||||
|
@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
return *this;
|
||||
}
|
||||
|
||||
/** Returns an AlignedBox that is the intersection of \a b and \c *this */
|
||||
/** Returns an AlignedBox that is the intersection of \a b and \c *this
|
||||
* \note If the boxes don't intersect, the resulting box is empty.
|
||||
* \sa intersects(), clamp, contains() */
|
||||
inline AlignedBox intersection(const AlignedBox& b) const
|
||||
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
|
||||
|
||||
/** Returns an AlignedBox that is the union of \a b and \c *this */
|
||||
/** Returns an AlignedBox that is the union of \a b and \c *this.
|
||||
* \note Merging with an empty box may result in a box bigger than \c *this.
|
||||
* \sa extend(const AlignedBox&) */
|
||||
inline AlignedBox merged(const AlignedBox& b) const
|
||||
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
|
||||
|
||||
|
@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
|
||||
/** \returns the squared distance between the point \a p and the box \c *this,
|
||||
* and zero if \a p is inside the box.
|
||||
* \sa exteriorDistance()
|
||||
* \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
|
||||
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
|
||||
|
||||
/** \returns the squared distance between the boxes \a b and \c *this,
|
||||
* and zero if the boxes intersect.
|
||||
* \sa exteriorDistance()
|
||||
* \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
|
||||
*/
|
||||
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
|
||||
|
||||
/** \returns the distance between the point \a p and the box \c *this,
|
||||
* and zero if \a p is inside the box.
|
||||
* \sa squaredExteriorDistance()
|
||||
* \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
|
||||
|
@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||
|
||||
/** \returns the distance between the boxes \a b and \c *this,
|
||||
* and zero if the boxes intersect.
|
||||
* \sa squaredExteriorDistance()
|
||||
* \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
|
||||
*/
|
||||
inline NonInteger exteriorDistance(const AlignedBox& b) const
|
||||
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
|
||||
|
|
|
@ -79,7 +79,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
|
|||
{
|
||||
if( (int(Direction)==Vertical && row==m_matrix.rows())
|
||||
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
|
||||
return 1;
|
||||
return Scalar(1);
|
||||
return m_matrix.coeff(row, col);
|
||||
}
|
||||
|
||||
|
|
|
@ -102,11 +102,11 @@ public:
|
|||
/** \returns a quaternion representing an identity rotation
|
||||
* \sa MatrixBase::Identity()
|
||||
*/
|
||||
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
|
||||
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }
|
||||
|
||||
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
|
||||
*/
|
||||
inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
|
||||
inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }
|
||||
|
||||
/** \returns the squared norm of the quaternion's coefficients
|
||||
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
|
||||
|
@ -161,7 +161,7 @@ public:
|
|||
{ return coeffs().isApprox(other.coeffs(), prec); }
|
||||
|
||||
/** return the result vector of \a v through the rotation*/
|
||||
EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
|
||||
EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
|
||||
|
||||
/** \returns \c *this with scalar type casted to \a NewScalarType
|
||||
*
|
||||
|
@ -231,7 +231,7 @@ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
|
|||
public:
|
||||
typedef _Scalar Scalar;
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
|
||||
using Base::operator*=;
|
||||
|
||||
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
|
||||
|
@ -341,7 +341,7 @@ class Map<const Quaternion<_Scalar>, _Options >
|
|||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||
using Base::operator*=;
|
||||
|
||||
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
||||
|
@ -378,7 +378,7 @@ class Map<Quaternion<_Scalar>, _Options >
|
|||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||
using Base::operator*=;
|
||||
|
||||
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
||||
|
@ -461,7 +461,7 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const Quaterni
|
|||
*/
|
||||
template <class Derived>
|
||||
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
|
||||
QuaternionBase<Derived>::_transformVector(Vector3 v) const
|
||||
QuaternionBase<Derived>::_transformVector(const Vector3& v) const
|
||||
{
|
||||
// Note that this algorithm comes from the optimization by hand
|
||||
// of the conversion to a Matrix followed by a Matrix/Vector product.
|
||||
|
@ -637,7 +637,7 @@ inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Der
|
|||
{
|
||||
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
|
||||
Scalar n2 = this->squaredNorm();
|
||||
if (n2 > 0)
|
||||
if (n2 > Scalar(0))
|
||||
return Quaternion<Scalar>(conjugate().coeffs() / n2);
|
||||
else
|
||||
{
|
||||
|
@ -667,12 +667,10 @@ template <class OtherDerived>
|
|||
inline typename internal::traits<Derived>::Scalar
|
||||
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
|
||||
{
|
||||
using std::acos;
|
||||
using std::atan2;
|
||||
using std::abs;
|
||||
Scalar d = abs(this->dot(other));
|
||||
if (d>=Scalar(1))
|
||||
return Scalar(0);
|
||||
return Scalar(2) * acos(d);
|
||||
Quaternion<Scalar> d = (*this) * other.conjugate();
|
||||
return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
|
||||
}
|
||||
|
||||
|
||||
|
@ -712,7 +710,7 @@ QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerive
|
|||
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
|
||||
scale1 = sin( ( t * theta) ) / sinTheta;
|
||||
}
|
||||
if(d<0) scale1 = -scale1;
|
||||
if(d<Scalar(0)) scale1 = -scale1;
|
||||
|
||||
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
|
||||
}
|
||||
|
|
|
@ -65,10 +65,10 @@ class DiagonalPreconditioner
|
|||
{
|
||||
typename MatType::InnerIterator it(mat,j);
|
||||
while(it && it.index()!=j) ++it;
|
||||
if(it && it.index()==j)
|
||||
if(it && it.index()==j && it.value()!=Scalar(0))
|
||||
m_invdiag(j) = Scalar(1)/it.value();
|
||||
else
|
||||
m_invdiag(j) = 0;
|
||||
m_invdiag(j) = Scalar(1);
|
||||
}
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
|
|
|
@ -151,20 +151,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
|
|||
* \endcode
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method. Here is a step by
|
||||
* step execution example starting with a random guess and printing the evolution
|
||||
* of the estimated error:
|
||||
* * \code
|
||||
* x = VectorXd::Random(n);
|
||||
* solver.setMaxIterations(1);
|
||||
* int i = 0;
|
||||
* do {
|
||||
* x = solver.solveWithGuess(b,x);
|
||||
* std::cout << i << " : " << solver.error() << std::endl;
|
||||
* ++i;
|
||||
* } while (solver.info()!=Success && i<100);
|
||||
* \endcode
|
||||
* Note that such a step by step excution is slightly slower.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
|
|
|
@ -112,9 +112,9 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
|||
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
|
||||
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
|
||||
* Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
|
@ -137,20 +137,7 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
|||
* \endcode
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method. Here is a step by
|
||||
* step execution example starting with a random guess and printing the evolution
|
||||
* of the estimated error:
|
||||
* * \code
|
||||
* x = VectorXd::Random(n);
|
||||
* cg.setMaxIterations(1);
|
||||
* int i = 0;
|
||||
* do {
|
||||
* x = cg.solveWithGuess(b,x);
|
||||
* std::cout << i << " : " << cg.error() << std::endl;
|
||||
* ++i;
|
||||
* } while (cg.info()!=Success && i<100);
|
||||
* \endcode
|
||||
* Note that such a step by step excution is slightly slower.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
|
@ -213,6 +200,10 @@ public:
|
|||
template<typename Rhs,typename Dest>
|
||||
void _solveWithGuess(const Rhs& b, Dest& x) const
|
||||
{
|
||||
typedef typename internal::conditional<UpLo==(Lower|Upper),
|
||||
const MatrixType&,
|
||||
SparseSelfAdjointView<const MatrixType, UpLo>
|
||||
>::type MatrixWrapperType;
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
|
@ -222,8 +213,7 @@ public:
|
|||
m_error = Base::m_tolerance;
|
||||
|
||||
typename Dest::ColXpr xj(x,j);
|
||||
internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
|
||||
Base::m_preconditioner, m_iterations, m_error);
|
||||
internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
|
@ -234,7 +224,7 @@ public:
|
|||
template<typename Rhs,typename Dest>
|
||||
void _solve(const Rhs& b, Dest& x) const
|
||||
{
|
||||
x.setOnes();
|
||||
x.setZero();
|
||||
_solveWithGuess(b,x);
|
||||
}
|
||||
|
||||
|
|
|
@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable
|
|||
{
|
||||
analyzePattern(amat);
|
||||
factorize(amat);
|
||||
m_isInitialized = m_factorizationIsOk;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -235,6 +234,8 @@ void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
|
|||
m_Pinv = m_P.inverse(); // ... and the inverse permutation
|
||||
|
||||
m_analysisIsOk = true;
|
||||
m_factorizationIsOk = false;
|
||||
m_isInitialized = false;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
|
@ -442,6 +443,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
|||
m_lu.makeCompressed();
|
||||
|
||||
m_factorizationIsOk = true;
|
||||
m_isInitialized = m_factorizationIsOk;
|
||||
m_info = Success;
|
||||
}
|
||||
|
||||
|
|
|
@ -374,6 +374,12 @@ template<typename _MatrixType> class FullPivLU
|
|||
inline Index cols() const { return m_lu.cols(); }
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_lu;
|
||||
PermutationPType m_p;
|
||||
PermutationQType m_q;
|
||||
|
@ -418,6 +424,8 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
|
|||
template<typename MatrixType>
|
||||
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
// the permutations are stored as int indices, so just to be sure:
|
||||
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
|
||||
|
||||
|
|
|
@ -171,6 +171,12 @@ template<typename _MatrixType> class PartialPivLU
|
|||
inline Index cols() const { return m_lu.cols(); }
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_lu;
|
||||
PermutationType m_p;
|
||||
TranspositionType m_rowsTranspositions;
|
||||
|
@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
|
|||
template<typename MatrixType>
|
||||
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
// the row permutation is stored as int indices, so just to be sure:
|
||||
eigen_assert(matrix.rows()<NumTraits<int>::highest());
|
||||
|
||||
|
|
|
@ -137,22 +137,27 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
|||
degree[i] = len[i]; // degree of node i
|
||||
}
|
||||
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
|
||||
elen[n] = -2; /* n is a dead element */
|
||||
Cp[n] = -1; /* n is a root of assembly tree */
|
||||
w[n] = 0; /* n is a dead element */
|
||||
|
||||
/* --- Initialize degree lists ------------------------------------------ */
|
||||
for(i = 0; i < n; i++)
|
||||
{
|
||||
bool has_diag = false;
|
||||
for(p = Cp[i]; p<Cp[i+1]; ++p)
|
||||
if(Ci[p]==i)
|
||||
{
|
||||
has_diag = true;
|
||||
break;
|
||||
}
|
||||
|
||||
d = degree[i];
|
||||
if(d == 0) /* node i is empty */
|
||||
if(d == 1 && has_diag) /* node i is empty */
|
||||
{
|
||||
elen[i] = -2; /* element i is dead */
|
||||
nel++;
|
||||
Cp[i] = -1; /* i is a root of assembly tree */
|
||||
w[i] = 0;
|
||||
}
|
||||
else if(d > dense) /* node i is dense */
|
||||
else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
|
||||
{
|
||||
nv[i] = 0; /* absorb i into element n */
|
||||
elen[i] = -1; /* node i is dead */
|
||||
|
@ -168,6 +173,10 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
|||
}
|
||||
}
|
||||
|
||||
elen[n] = -2; /* n is a dead element */
|
||||
Cp[n] = -1; /* n is a root of assembly tree */
|
||||
w[n] = 0; /* n is a dead element */
|
||||
|
||||
while (nel < n) /* while (selecting pivots) do */
|
||||
{
|
||||
/* --- Select node of minimum approximate degree -------------------- */
|
||||
|
|
|
@ -384,6 +384,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
|||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_qr;
|
||||
HCoeffsType m_hCoeffs;
|
||||
PermutationType m_colsPermutation;
|
||||
|
@ -422,6 +428,8 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
|
|||
template<typename MatrixType>
|
||||
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::abs;
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
|
@ -463,20 +471,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
|||
// we store that back into our table: it can't hurt to correct our table.
|
||||
m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
|
||||
|
||||
// if the current biggest column is smaller than epsilon times the initial biggest column,
|
||||
// terminate to avoid generating nan/inf values.
|
||||
// Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so)
|
||||
// repetitions of the unit test, with the result of solve() filled with large values of the order
|
||||
// of 1/(size*epsilon).
|
||||
if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
|
||||
{
|
||||
// Track the number of meaningful pivots but do not stop the decomposition to make
|
||||
// sure that the initial matrix is properly reproduced. See bug 941.
|
||||
if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
|
||||
m_nonzero_pivots = k;
|
||||
m_hCoeffs.tail(size-k).setZero();
|
||||
m_qr.bottomRightCorner(rows-k,cols-k)
|
||||
.template triangularView<StrictlyLower>()
|
||||
.setZero();
|
||||
break;
|
||||
}
|
||||
|
||||
// apply the transposition to the columns
|
||||
m_colsTranspositions.coeffRef(k) = biggest_col_index;
|
||||
|
@ -505,7 +503,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
|||
}
|
||||
|
||||
m_colsPermutation.setIdentity(PermIndexType(cols));
|
||||
for(PermIndexType k = 0; k < m_nonzero_pivots; ++k)
|
||||
for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
|
||||
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
|
||||
|
||||
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
|
||||
|
@ -555,13 +553,15 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
|
|||
|
||||
} // end namespace internal
|
||||
|
||||
/** \returns the matrix Q as a sequence of householder transformations */
|
||||
/** \returns the matrix Q as a sequence of householder transformations.
|
||||
* You can extract the meaningful part only by using:
|
||||
* \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
|
||||
template<typename MatrixType>
|
||||
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
|
||||
::householderQ() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
|
||||
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots);
|
||||
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
|
||||
}
|
||||
|
||||
/** \return the column-pivoting Householder QR decomposition of \c *this.
|
||||
|
|
|
@ -63,12 +63,12 @@ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynami
|
|||
\
|
||||
m_nonzero_pivots = 0; \
|
||||
m_maxpivot = RealScalar(0);\
|
||||
m_colsPermutation.resize((int)cols); \
|
||||
m_colsPermutation.resize(cols); \
|
||||
m_colsPermutation.indices().setZero(); \
|
||||
\
|
||||
lapack_int lda = (lapack_int) m_qr.outerStride(), i; \
|
||||
lapack_int lda = m_qr.outerStride(), i; \
|
||||
lapack_int matrix_order = MKLCOLROW; \
|
||||
LAPACKE_##MKLPREFIX##geqp3( matrix_order, (lapack_int)rows, (lapack_int)cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
|
||||
LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
|
||||
m_isInitialized = true; \
|
||||
m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
|
||||
m_hCoeffs.adjointInPlace(); \
|
||||
|
|
|
@ -368,6 +368,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||
RealScalar maxPivot() const { return m_maxpivot; }
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_qr;
|
||||
HCoeffsType m_hCoeffs;
|
||||
IntDiagSizeVectorType m_rows_transpositions;
|
||||
|
@ -407,6 +413,8 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
|
|||
template<typename MatrixType>
|
||||
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::abs;
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
|
|
|
@ -189,6 +189,12 @@ template<typename _MatrixType> class HouseholderQR
|
|||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_qr;
|
||||
HCoeffsType m_hCoeffs;
|
||||
RowVectorType m_temp;
|
||||
|
@ -349,6 +355,8 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
|||
template<typename MatrixType>
|
||||
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
Index rows = matrix.rows();
|
||||
Index cols = matrix.cols();
|
||||
Index size = (std::min)(rows,cols);
|
||||
|
|
|
@ -64,19 +64,13 @@ class SPQR
|
|||
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
|
||||
public:
|
||||
SPQR()
|
||||
: m_isInitialized(false),
|
||||
m_ordering(SPQR_ORDERING_DEFAULT),
|
||||
m_allow_tol(SPQR_DEFAULT_TOL),
|
||||
m_tolerance (NumTraits<Scalar>::epsilon())
|
||||
: m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
|
||||
{
|
||||
cholmod_l_start(&m_cc);
|
||||
}
|
||||
|
||||
SPQR(const _MatrixType& matrix)
|
||||
: m_isInitialized(false),
|
||||
m_ordering(SPQR_ORDERING_DEFAULT),
|
||||
m_allow_tol(SPQR_DEFAULT_TOL),
|
||||
m_tolerance (NumTraits<Scalar>::epsilon())
|
||||
SPQR(const _MatrixType& matrix)
|
||||
: m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
|
||||
{
|
||||
cholmod_l_start(&m_cc);
|
||||
compute(matrix);
|
||||
|
@ -101,10 +95,26 @@ class SPQR
|
|||
if(m_isInitialized) SPQR_free();
|
||||
|
||||
MatrixType mat(matrix);
|
||||
|
||||
/* Compute the default threshold as in MatLab, see:
|
||||
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
|
||||
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
|
||||
*/
|
||||
RealScalar pivotThreshold = m_tolerance;
|
||||
if(m_useDefaultThreshold)
|
||||
{
|
||||
using std::max;
|
||||
RealScalar max2Norm = 0.0;
|
||||
for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm());
|
||||
if(max2Norm==RealScalar(0))
|
||||
max2Norm = RealScalar(1);
|
||||
pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
|
||||
}
|
||||
|
||||
cholmod_sparse A;
|
||||
A = viewAsCholmod(mat);
|
||||
Index col = matrix.cols();
|
||||
m_rank = SuiteSparseQR<Scalar>(m_ordering, m_tolerance, col, &A,
|
||||
m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
|
||||
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
|
||||
|
||||
if (!m_cR)
|
||||
|
@ -120,7 +130,7 @@ class SPQR
|
|||
/**
|
||||
* Get the number of rows of the input matrix and the Q matrix
|
||||
*/
|
||||
inline Index rows() const {return m_H->nrow; }
|
||||
inline Index rows() const {return m_cR->nrow; }
|
||||
|
||||
/**
|
||||
* Get the number of columns of the input matrix.
|
||||
|
@ -145,16 +155,25 @@ class SPQR
|
|||
{
|
||||
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
|
||||
eigen_assert(b.cols()==1 && "This method is for vectors only");
|
||||
|
||||
|
||||
//Compute Q^T * b
|
||||
typename Dest::PlainObject y;
|
||||
typename Dest::PlainObject y, y2;
|
||||
y = matrixQ().transpose() * b;
|
||||
// Solves with the triangular matrix R
|
||||
|
||||
// Solves with the triangular matrix R
|
||||
Index rk = this->rank();
|
||||
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y.topRows(rk));
|
||||
y.bottomRows(cols()-rk).setZero();
|
||||
y2 = y;
|
||||
y.resize((std::max)(cols(),Index(y.rows())),y.cols());
|
||||
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
|
||||
|
||||
// Apply the column permutation
|
||||
dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
|
||||
// colsPermutation() performs a copy of the permutation,
|
||||
// so let's apply it manually:
|
||||
for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
|
||||
for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
|
||||
|
||||
// y.bottomRows(y.rows()-rk).setZero();
|
||||
// dest = colsPermutation() * y.topRows(cols());
|
||||
|
||||
m_info = Success;
|
||||
}
|
||||
|
@ -197,7 +216,11 @@ class SPQR
|
|||
/// Set the fill-reducing ordering method to be used
|
||||
void setSPQROrdering(int ord) { m_ordering = ord;}
|
||||
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
|
||||
void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
|
||||
void setPivotThreshold(const RealScalar& tol)
|
||||
{
|
||||
m_useDefaultThreshold = false;
|
||||
m_tolerance = tol;
|
||||
}
|
||||
|
||||
/** \returns a pointer to the SPQR workspace */
|
||||
cholmod_common *cholmodCommon() const { return &m_cc; }
|
||||
|
@ -230,6 +253,7 @@ class SPQR
|
|||
mutable cholmod_dense *m_HTau; // The Householder coefficients
|
||||
mutable Index m_rank; // The rank of the matrix
|
||||
mutable cholmod_common m_cc; // Workspace and parameters
|
||||
bool m_useDefaultThreshold; // Use default threshold
|
||||
template<typename ,typename > friend struct SPQR_QProduct;
|
||||
};
|
||||
|
||||
|
|
|
@ -742,6 +742,11 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
|
|||
|
||||
private:
|
||||
void allocate(Index rows, Index cols, unsigned int computationOptions);
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
protected:
|
||||
MatrixUType m_matrixU;
|
||||
|
@ -818,6 +823,8 @@ template<typename MatrixType, int QRPreconditioner>
|
|||
JacobiSVD<MatrixType, QRPreconditioner>&
|
||||
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::abs;
|
||||
allocate(matrix.rows(), matrix.cols(), computationOptions);
|
||||
|
||||
|
|
|
@ -57,6 +57,16 @@ public:
|
|||
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
||||
{}
|
||||
|
||||
inline const Scalar coeff(int row, int col) const
|
||||
{
|
||||
return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart);
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int index) const
|
||||
{
|
||||
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
@ -226,6 +236,21 @@ public:
|
|||
else
|
||||
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
{
|
||||
return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int row, int col) const
|
||||
{
|
||||
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int index) const
|
||||
{
|
||||
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||
}
|
||||
|
||||
const Scalar& lastCoeff() const
|
||||
{
|
||||
|
@ -313,6 +338,16 @@ public:
|
|||
else
|
||||
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int row, int col) const
|
||||
{
|
||||
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int index) const
|
||||
{
|
||||
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||
}
|
||||
|
||||
const Scalar& lastCoeff() const
|
||||
{
|
||||
|
@ -355,7 +390,8 @@ const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatri
|
|||
* is col-major (resp. row-major).
|
||||
*/
|
||||
template<typename Derived>
|
||||
Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
|
||||
typename SparseMatrixBase<Derived>::InnerVectorsReturnType
|
||||
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
|
||||
{
|
||||
return Block<Derived,Dynamic,Dynamic,true>(derived(),
|
||||
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
||||
|
@ -367,7 +403,8 @@ Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Inde
|
|||
* is col-major (resp. row-major). Read-only.
|
||||
*/
|
||||
template<typename Derived>
|
||||
const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
|
||||
const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
|
||||
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
|
||||
{
|
||||
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
|
||||
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
||||
|
@ -394,8 +431,8 @@ public:
|
|||
: m_matrix(xpr),
|
||||
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
|
||||
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
|
||||
m_blockRows(xpr.rows()),
|
||||
m_blockCols(xpr.cols())
|
||||
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
|
||||
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
|
||||
{}
|
||||
|
||||
/** Dynamic-size constructor
|
||||
|
@ -497,3 +534,4 @@ public:
|
|||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_BLOCK_H
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
|
|||
typename Res::Scalar tmp(0);
|
||||
for(LhsInnerIterator it(lhs,j); it ;++it)
|
||||
tmp += it.value() * rhs.coeff(it.index(),c);
|
||||
res.coeffRef(j,c) = alpha * tmp;
|
||||
res.coeffRef(j,c) += alpha * tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -404,8 +404,10 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
|||
const ConstInnerVectorReturnType innerVector(Index outer) const;
|
||||
|
||||
// set of inner-vectors
|
||||
Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
|
||||
const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
|
||||
typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
|
||||
typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
|
||||
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
|
||||
const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
|
||||
|
||||
/** \internal use operator= */
|
||||
template<typename DenseDerived>
|
||||
|
|
|
@ -158,6 +158,7 @@ class SparseVector
|
|||
|
||||
Index inner = IsColVector ? row : col;
|
||||
Index outer = IsColVector ? col : row;
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(outer);
|
||||
eigen_assert(outer==0);
|
||||
return insert(inner);
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
|
|||
for(int i=lhs.rows()-1 ; i>=0 ; --i)
|
||||
{
|
||||
Scalar tmp = other.coeff(i,col);
|
||||
Scalar l_ii = 0;
|
||||
Scalar l_ii(0);
|
||||
typename Lhs::InnerIterator it(lhs, i);
|
||||
while(it && it.index()<i)
|
||||
++it;
|
||||
|
|
|
@ -268,7 +268,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
|||
{
|
||||
if(it.index() == j)
|
||||
{
|
||||
det *= (std::abs)(it.value());
|
||||
using std::abs;
|
||||
det *= abs(it.value());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -295,7 +296,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
|||
if(it.row() < j) continue;
|
||||
if(it.row() == j)
|
||||
{
|
||||
det += (std::log)((std::abs)(it.value()));
|
||||
using std::log; using std::abs;
|
||||
det += log(abs(it.value()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -303,21 +305,64 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
|||
return det;
|
||||
}
|
||||
|
||||
/** \returns A number representing the sign of the determinant
|
||||
*
|
||||
* \sa absDeterminant(), logAbsDeterminant()
|
||||
*/
|
||||
Scalar signDeterminant()
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||
return Scalar(m_detPermR);
|
||||
}
|
||||
/** \returns A number representing the sign of the determinant
|
||||
*
|
||||
* \sa absDeterminant(), logAbsDeterminant()
|
||||
*/
|
||||
Scalar signDeterminant()
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||
// Initialize with the determinant of the row matrix
|
||||
Index det = 1;
|
||||
// Note that the diagonal blocks of U are stored in supernodes,
|
||||
// which are available in the L part :)
|
||||
for (Index j = 0; j < this->cols(); ++j)
|
||||
{
|
||||
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
|
||||
{
|
||||
if(it.index() == j)
|
||||
{
|
||||
if(it.value()<0)
|
||||
det = -det;
|
||||
else if(it.value()==0)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return det * m_detPermR * m_detPermC;
|
||||
}
|
||||
|
||||
/** \returns The determinant of the matrix.
|
||||
*
|
||||
* \sa absDeterminant(), logAbsDeterminant()
|
||||
*/
|
||||
Scalar determinant()
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||
// Initialize with the determinant of the row matrix
|
||||
Scalar det = Scalar(1.);
|
||||
// Note that the diagonal blocks of U are stored in supernodes,
|
||||
// which are available in the L part :)
|
||||
for (Index j = 0; j < this->cols(); ++j)
|
||||
{
|
||||
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
|
||||
{
|
||||
if(it.index() == j)
|
||||
{
|
||||
det *= it.value();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return det * Scalar(m_detPermR * m_detPermC);
|
||||
}
|
||||
|
||||
protected:
|
||||
// Functions
|
||||
void initperfvalues()
|
||||
{
|
||||
m_perfv.panel_size = 1;
|
||||
m_perfv.panel_size = 16;
|
||||
m_perfv.relax = 1;
|
||||
m_perfv.maxsuper = 128;
|
||||
m_perfv.rowblk = 16;
|
||||
|
@ -345,8 +390,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
|||
// values for performance
|
||||
internal::perfvalues<Index> m_perfv;
|
||||
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
|
||||
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
||||
Index m_detPermR; // Determinant of the coefficient matrix
|
||||
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
||||
Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
|
||||
private:
|
||||
// Disable copy constructor
|
||||
SparseLU (const SparseLU& );
|
||||
|
@ -622,7 +667,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||
}
|
||||
|
||||
// Update the determinant of the row permutation matrix
|
||||
if (pivrow != jj) m_detPermR *= -1;
|
||||
// FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
|
||||
if (pivrow != jj) m_detPermR = -m_detPermR;
|
||||
|
||||
// Prune columns (0:jj-1) using column jj
|
||||
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
|
||||
|
@ -637,10 +683,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||
jcol += panel_size; // Move to the next panel
|
||||
} // end for -- end elimination
|
||||
|
||||
m_detPermR = m_perm_r.determinant();
|
||||
m_detPermC = m_perm_c.determinant();
|
||||
|
||||
// Count the number of nonzeros in factors
|
||||
Base::countnz(n, m_nnzL, m_nnzU, m_glu);
|
||||
// Apply permutation to the L subscripts
|
||||
Base::fixupL(n, m_perm_r.indices(), m_glu);
|
||||
Base::fixupL(n, m_perm_r.indices(), m_glu);
|
||||
|
||||
// Create supernode matrix L
|
||||
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
|
||||
|
@ -700,8 +749,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
|
|||
}
|
||||
else
|
||||
{
|
||||
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||
U = A.template triangularView<Upper>().solve(U);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@ class SparseLUImpl
|
|||
{
|
||||
public:
|
||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> ScalarMatrix;
|
||||
typedef Map<ScalarMatrix, 0, OuterStride<> > MappedMatrixBlock;
|
||||
typedef Matrix<Index,Dynamic,1> IndexVector;
|
||||
typedef typename ScalarVector::RealScalar RealScalar;
|
||||
typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
|
||||
|
|
|
@ -153,8 +153,8 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
|
|||
{
|
||||
Index& num_expansions = glu.num_expansions; //No memory expansions so far
|
||||
num_expansions = 0;
|
||||
glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U
|
||||
glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor
|
||||
glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U
|
||||
glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor
|
||||
// Return the estimated size to the user if necessary
|
||||
Index tempSpace;
|
||||
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
|
||||
|
|
|
@ -236,7 +236,7 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
|
|||
Index n = X.rows();
|
||||
Index nrhs = X.cols();
|
||||
const Scalar * Lval = valuePtr(); // Nonzero values
|
||||
Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
|
||||
Matrix<Scalar,Dynamic,Dynamic, ColMajor> work(n, nrhs); // working vector
|
||||
work.setZero();
|
||||
for (Index k = 0; k <= nsuper(); k ++)
|
||||
{
|
||||
|
@ -267,12 +267,12 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
|
|||
Index lda = colIndexPtr()[fsupc+1] - luptr;
|
||||
|
||||
// Triangular solve
|
||||
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||
U = A.template triangularView<UnitLower>().solve(U);
|
||||
|
||||
// Matrix-vector product
|
||||
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||
work.block(0, 0, nrow, nrhs) = A * U;
|
||||
|
||||
//Begin Scatter
|
||||
|
|
|
@ -162,11 +162,11 @@ Index SparseLUImpl<Scalar,Index>::column_bmod(const Index jcol, const Index nseg
|
|||
// points to the beginning of jcol in snode L\U(jsupno)
|
||||
ufirst = glu.xlusup(jcol) + d_fsupc;
|
||||
Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
|
||||
u = A.template triangularView<UnitLower>().solve(u);
|
||||
|
||||
new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||
new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||
VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
|
||||
l.noalias() -= A * u;
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsi
|
|||
// Dense triangular solve -- start effective triangle
|
||||
luptr += lda * no_zeros + no_zeros;
|
||||
// Form Eigen matrix and vector
|
||||
Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
|
||||
Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
|
||||
Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
|
||||
|
||||
u = A.template triangularView<UnitLower>().solve(u);
|
||||
|
@ -65,7 +65,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsi
|
|||
luptr += segsize;
|
||||
const Index PacketSize = internal::packet_traits<Scalar>::size;
|
||||
Index ldl = internal::first_multiple(nrow, PacketSize);
|
||||
Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
|
||||
Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
|
||||
Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
|
||||
Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
|
||||
Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
|
||||
|
|
|
@ -102,7 +102,7 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const
|
|||
if(nsupc >= 2)
|
||||
{
|
||||
Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic>, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
|
||||
Map<ScalarMatrix, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
|
||||
|
||||
// gather U
|
||||
Index u_col = 0;
|
||||
|
@ -136,17 +136,17 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const
|
|||
Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
|
||||
no_zeros = (krep - u_rows + 1) - fsupc;
|
||||
luptr += lda * no_zeros + no_zeros;
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
|
||||
MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
|
||||
U = A.template triangularView<UnitLower>().solve(U);
|
||||
|
||||
// update
|
||||
luptr += u_rows;
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
|
||||
MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
|
||||
eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
|
||||
|
||||
Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
|
||||
Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
|
||||
MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
|
||||
|
||||
L.setZero();
|
||||
internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
|
||||
|
|
|
@ -71,13 +71,14 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
|
|||
|
||||
// Determine the largest abs numerical value for partial pivoting
|
||||
Index diagind = iperm_c(jcol); // diagonal index
|
||||
RealScalar pivmax = 0.0;
|
||||
RealScalar pivmax(-1.0);
|
||||
Index pivptr = nsupc;
|
||||
Index diag = emptyIdxLU;
|
||||
RealScalar rtemp;
|
||||
Index isub, icol, itemp, k;
|
||||
for (isub = nsupc; isub < nsupr; ++isub) {
|
||||
rtemp = std::abs(lu_col_ptr[isub]);
|
||||
using std::abs;
|
||||
rtemp = abs(lu_col_ptr[isub]);
|
||||
if (rtemp > pivmax) {
|
||||
pivmax = rtemp;
|
||||
pivptr = isub;
|
||||
|
@ -86,8 +87,9 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
|
|||
}
|
||||
|
||||
// Test for singularity
|
||||
if ( pivmax == 0.0 ) {
|
||||
pivrow = lsub_ptr[pivptr];
|
||||
if ( pivmax <= RealScalar(0.0) ) {
|
||||
// if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero
|
||||
pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr];
|
||||
perm_r(pivrow) = jcol;
|
||||
return (jcol+1);
|
||||
}
|
||||
|
@ -101,7 +103,8 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
|
|||
if (diag >= 0 )
|
||||
{
|
||||
// Diagonal element exists
|
||||
rtemp = std::abs(lu_col_ptr[diag]);
|
||||
using std::abs;
|
||||
rtemp = abs(lu_col_ptr[diag]);
|
||||
if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
|
||||
}
|
||||
pivrow = lsub_ptr[pivptr];
|
||||
|
|
|
@ -70,6 +70,43 @@ max
|
|||
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
|
||||
}
|
||||
|
||||
|
||||
#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \
|
||||
template<typename OtherDerived> \
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \
|
||||
OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||
{ \
|
||||
return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \
|
||||
}\
|
||||
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \
|
||||
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \
|
||||
EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \
|
||||
OP(const Scalar& s) const { \
|
||||
return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
|
||||
} \
|
||||
friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
|
||||
OP(const Scalar& s, const Derived& d) { \
|
||||
return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
|
||||
}
|
||||
|
||||
#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \
|
||||
template<typename OtherDerived> \
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \
|
||||
OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||
{ \
|
||||
return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \
|
||||
} \
|
||||
\
|
||||
inline const RCmp ## RCOMPARATOR ## ReturnType \
|
||||
OP(const Scalar& s) const { \
|
||||
return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \
|
||||
} \
|
||||
friend inline const Cmp ## RCOMPARATOR ## ReturnType \
|
||||
OP(const Scalar& s, const Derived& d) { \
|
||||
return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \
|
||||
}
|
||||
|
||||
|
||||
/** \returns an expression of the coefficient-wise \< operator of *this and \a other
|
||||
*
|
||||
* Example: \include Cwise_less.cpp
|
||||
|
@ -77,7 +114,7 @@ max
|
|||
*
|
||||
* \sa all(), any(), operator>(), operator<=()
|
||||
*/
|
||||
EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
|
||||
EIGEN_MAKE_CWISE_COMP_OP(operator<, LT)
|
||||
|
||||
/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
|
||||
*
|
||||
|
@ -86,7 +123,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
|
|||
*
|
||||
* \sa all(), any(), operator>=(), operator<()
|
||||
*/
|
||||
EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
|
||||
EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)
|
||||
|
||||
/** \returns an expression of the coefficient-wise \> operator of *this and \a other
|
||||
*
|
||||
|
@ -95,7 +132,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
|
|||
*
|
||||
* \sa all(), any(), operator>=(), operator<()
|
||||
*/
|
||||
EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
|
||||
EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)
|
||||
|
||||
/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
|
||||
*
|
||||
|
@ -104,7 +141,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
|
|||
*
|
||||
* \sa all(), any(), operator>(), operator<=()
|
||||
*/
|
||||
EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
|
||||
EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)
|
||||
|
||||
/** \returns an expression of the coefficient-wise == operator of *this and \a other
|
||||
*
|
||||
|
@ -118,7 +155,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
|
|||
*
|
||||
* \sa all(), any(), isApprox(), isMuchSmallerThan()
|
||||
*/
|
||||
EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
|
||||
EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)
|
||||
|
||||
/** \returns an expression of the coefficient-wise != operator of *this and \a other
|
||||
*
|
||||
|
@ -132,7 +169,10 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
|
|||
*
|
||||
* \sa all(), any(), isApprox(), isMuchSmallerThan()
|
||||
*/
|
||||
EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to)
|
||||
EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)
|
||||
|
||||
#undef EIGEN_MAKE_CWISE_COMP_OP
|
||||
#undef EIGEN_MAKE_CWISE_COMP_R_OP
|
||||
|
||||
// scalar addition
|
||||
|
||||
|
@ -209,3 +249,5 @@ operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
|
|||
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
|
||||
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -185,19 +185,3 @@ cube() const
|
|||
{
|
||||
return derived();
|
||||
}
|
||||
|
||||
#define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \
|
||||
inline const CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
|
||||
METHOD_NAME(const Scalar& s) const { \
|
||||
return CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
|
||||
(derived(), std::bind2nd(FUNCTOR<Scalar>(), s)); \
|
||||
}
|
||||
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator==, std::equal_to)
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator!=, std::not_equal_to)
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<, std::less)
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
|
||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
|
||||
|
||||
|
||||
|
|
|
@ -124,3 +124,20 @@ cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
|
|||
{
|
||||
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
|
||||
}
|
||||
|
||||
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;
|
||||
|
||||
/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
|
||||
*
|
||||
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
|
||||
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
|
||||
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
|
||||
* isMuchSmallerThan().
|
||||
*
|
||||
* \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
|
||||
*/
|
||||
inline const CwiseScalarEqualReturnType
|
||||
cwiseEqual(const Scalar& s) const
|
||||
{
|
||||
return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,internal::cmp_EQ>());
|
||||
}
|
||||
|
|
|
@ -50,18 +50,3 @@ cwiseSqrt() const { return derived(); }
|
|||
inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
|
||||
cwiseInverse() const { return derived(); }
|
||||
|
||||
/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
|
||||
*
|
||||
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
|
||||
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
|
||||
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
|
||||
* isMuchSmallerThan().
|
||||
*
|
||||
* \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
|
||||
*/
|
||||
inline const CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >, const Derived>
|
||||
cwiseEqual(const Scalar& s) const
|
||||
{
|
||||
return CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,const Derived>
|
||||
(derived(), std::bind1st(std::equal_to<Scalar>(), s));
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
#include <iostream>
|
||||
|
||||
#if (defined __GNUC__)
|
||||
#if (defined __GNUC__) && (!defined __MINGW32__) && (!defined __CYGWIN__)
|
||||
#define EIGEN_WEAK_LINKING __attribute__ ((weak))
|
||||
#else
|
||||
#define EIGEN_WEAK_LINKING
|
||||
|
|
|
@ -322,22 +322,21 @@ macro(ei_get_compilerver VAR)
|
|||
endif()
|
||||
else()
|
||||
# on all other system we rely on ${CMAKE_CXX_COMPILER}
|
||||
# supporting a "--version" flag
|
||||
# supporting a "--version" or "/version" flag
|
||||
|
||||
# check whether the head command exists
|
||||
find_program(HEAD_EXE head NO_CMAKE_ENVIRONMENT_PATH NO_CMAKE_PATH NO_CMAKE_SYSTEM_PATH)
|
||||
if(HEAD_EXE)
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
||||
COMMAND head -n 1
|
||||
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if(WIN32 AND ${CMAKE_CXX_COMPILER_ID} EQUAL "Intel")
|
||||
set(EIGEN_CXX_FLAG_VERSION "/version")
|
||||
else()
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
||||
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string})
|
||||
set(EIGEN_CXX_FLAG_VERSION "--version")
|
||||
endif()
|
||||
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION}
|
||||
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string})
|
||||
|
||||
ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER)
|
||||
set(${VAR} "${CNAME}-${CVER}")
|
||||
|
||||
endif()
|
||||
endmacro(ei_get_compilerver)
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ macro(_metis_check_version)
|
|||
string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}")
|
||||
set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}")
|
||||
if(NOT METIS_MAJOR_VERSION)
|
||||
message(WARNING "Could not determine Metis version. Assuming version 4.0.0")
|
||||
message(STATUS "Could not determine Metis version. Assuming version 4.0.0")
|
||||
set(METIS_VERSION 4.0.0)
|
||||
else()
|
||||
set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION})
|
||||
|
|
|
@ -91,7 +91,8 @@ add_custom_target(doc ALL
|
|||
COMMAND doxygen Doxyfile-unsupported
|
||||
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
|
||||
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
|
||||
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc/eigen-doc.tgz eigen-doc
|
||||
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
|
||||
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc.tgz eigen-doc/eigen-doc.tgz
|
||||
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
|
||||
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc)
|
||||
|
||||
|
|
|
@ -222,7 +222,7 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
|
|||
"note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \
|
||||
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
|
||||
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
|
||||
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"
|
||||
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\""
|
||||
|
||||
ALIASES += "eigenAutoToc= "
|
||||
ALIASES += "eigenManualPage=\defgroup"
|
||||
|
@ -315,7 +315,7 @@ IDL_PROPERTY_SUPPORT = YES
|
|||
# member in the group (if any) for the other members of the group. By default
|
||||
# all members of a group must be documented explicitly.
|
||||
|
||||
DISTRIBUTE_GROUP_DOC = NO
|
||||
DISTRIBUTE_GROUP_DOC = YES
|
||||
|
||||
# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
|
||||
# the same type (for instance a group of public functions) to be put as a
|
||||
|
@ -365,7 +365,7 @@ TYPEDEF_HIDES_STRUCT = NO
|
|||
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
|
||||
# corresponding to a cache size of 2^16 = 65536 symbols.
|
||||
|
||||
SYMBOL_CACHE_SIZE = 0
|
||||
# SYMBOL_CACHE_SIZE = 0
|
||||
|
||||
# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
|
||||
# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
|
||||
|
@ -562,7 +562,7 @@ GENERATE_BUGLIST = NO
|
|||
# disable (NO) the deprecated list. This list is created by putting
|
||||
# \deprecated commands in the documentation.
|
||||
|
||||
GENERATE_DEPRECATEDLIST= NO
|
||||
GENERATE_DEPRECATEDLIST= YES
|
||||
|
||||
# The ENABLED_SECTIONS tag can be used to enable conditional
|
||||
# documentation sections, marked by \if sectionname ... \endif.
|
||||
|
@ -1465,13 +1465,13 @@ XML_OUTPUT = xml
|
|||
# which can be used by a validating XML parser to check the
|
||||
# syntax of the XML files.
|
||||
|
||||
XML_SCHEMA =
|
||||
# XML_SCHEMA =
|
||||
|
||||
# The XML_DTD tag can be used to specify an XML DTD,
|
||||
# which can be used by a validating XML parser to check the
|
||||
# syntax of the XML files.
|
||||
|
||||
XML_DTD =
|
||||
# XML_DTD =
|
||||
|
||||
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
|
||||
# dump the program listings (including syntax highlighting
|
||||
|
@ -1699,7 +1699,7 @@ DOT_NUM_THREADS = 0
|
|||
# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
|
||||
# directory containing the font.
|
||||
|
||||
DOT_FONTNAME = FreeSans
|
||||
DOT_FONTNAME =
|
||||
|
||||
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
|
||||
# The default size is 10pt.
|
||||
|
|
|
@ -11,6 +11,7 @@ namespace Eigen {
|
|||
- \subpage TopicCustomizingEigen
|
||||
- \subpage TopicMultiThreading
|
||||
- \subpage TopicUsingIntelMKL
|
||||
- \subpage TopicPitfalls
|
||||
- \subpage TopicTemplateKeyword
|
||||
- \subpage UserManual_UnderstandingEigen
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
namespace Eigen {
|
||||
|
||||
/** \page TopicPitfalls Common pitfalls
|
||||
|
||||
\section TopicPitfalls_template_keyword Compilation error with template methods
|
||||
|
||||
See this \link TopicTemplateKeyword page \endlink.
|
||||
|
||||
\section TopicPitfalls_auto_keyword C++11 and the auto keyword
|
||||
|
||||
In short: do not use the auto keywords with Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a Matrix<> type. Here is an example:
|
||||
|
||||
\code
|
||||
MatrixXd A, B;
|
||||
auto C = A*B;
|
||||
for(...) { ... w = C * v; ...}
|
||||
\endcode
|
||||
|
||||
In this example, the type of C is not a MatrixXd but an abstract expression representing a matrix product and storing references to A and B. Therefore, the product of A*B will be carried out multiple times, once per iteration of the for loop. Moreover, if the coefficients of A or B change during the iteration, then C will evaluate to different values.
|
||||
|
||||
Here is another example leading to a segfault:
|
||||
\code
|
||||
auto C = ((A+B).eval()).transpose();
|
||||
// do something with C
|
||||
\endcode
|
||||
The problem is that eval() returns a temporary object (in this case a MatrixXd) which is then referenced by the Transpose<> expression. However, this temporary is deleted right after the first line, and there the C expression reference a dead object. The same issue might occur when sub expressions are automatically evaluated by Eigen as in the following example:
|
||||
\code
|
||||
VectorXd u, v;
|
||||
auto C = u + (A*v).normalized();
|
||||
// do something with C
|
||||
\endcode
|
||||
where the normalized() method has to evaluate the expensive product A*v to avoid evaluating it twice. On the other hand, the following example is perfectly fine:
|
||||
\code
|
||||
auto C = (u + (A*v).normalized()).eval();
|
||||
\endcode
|
||||
In this case, C will be a regular VectorXd object.
|
||||
*/
|
||||
}
|
|
@ -17,7 +17,7 @@ You can control the number of thread that will be used using either the OpenMP A
|
|||
Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode
|
||||
You can query the number of threads that will be used with:
|
||||
\code
|
||||
n = Eigen::nbThreads(n);
|
||||
n = Eigen::nbThreads( );
|
||||
\endcode
|
||||
You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token.
|
||||
|
||||
|
|
|
@ -40,7 +40,8 @@ Since Eigen version 3.1 and later, users can benefit from built-in Intel MKL opt
|
|||
<a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php"> Intel MKL </a> provides highly optimized multi-threaded mathematical routines for x86-compatible architectures.
|
||||
Intel MKL is available on Linux, Mac and Windows for both Intel64 and IA32 architectures.
|
||||
|
||||
\warning Be aware that Intel® MKL is a proprietary software. It is the responsibility of the users to buy MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL.
|
||||
\note
|
||||
Intel® MKL is a proprietary software and it is the responsibility of users to buy or register for community (free) Intel MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL.
|
||||
|
||||
Using Intel MKL through Eigen is easy:
|
||||
-# define the \c EIGEN_USE_MKL_ALL macro before including any Eigen's header
|
||||
|
|
|
@ -10,9 +10,10 @@ if(QT4_FOUND)
|
|||
target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY})
|
||||
|
||||
add_custom_command(
|
||||
TARGET Tutorial_sparse_example
|
||||
POST_BUILD
|
||||
COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
|
||||
TARGET Tutorial_sparse_example
|
||||
POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/../html/
|
||||
COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
|
||||
)
|
||||
|
||||
add_dependencies(all_examples Tutorial_sparse_example)
|
||||
|
|
|
@ -32,6 +32,17 @@ ei_add_failtest("ref_3")
|
|||
ei_add_failtest("ref_4")
|
||||
ei_add_failtest("ref_5")
|
||||
|
||||
ei_add_failtest("partialpivlu_int")
|
||||
ei_add_failtest("fullpivlu_int")
|
||||
ei_add_failtest("llt_int")
|
||||
ei_add_failtest("ldlt_int")
|
||||
ei_add_failtest("qr_int")
|
||||
ei_add_failtest("colpivqr_int")
|
||||
ei_add_failtest("fullpivqr_int")
|
||||
ei_add_failtest("jacobisvd_int")
|
||||
ei_add_failtest("eigensolver_int")
|
||||
ei_add_failtest("eigensolver_cplx")
|
||||
|
||||
if (EIGEN_FAILTEST_FAILURE_COUNT)
|
||||
message(FATAL_ERROR
|
||||
"${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. "
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/QR"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
ColPivHouseholderQR<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/Eigenvalues"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR std::complex<double>
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
EigenSolver<Matrix<SCALAR,Dynamic,Dynamic> > eig(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/Eigenvalues"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
EigenSolver<Matrix<SCALAR,Dynamic,Dynamic> > eig(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/LU"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
FullPivLU<Matrix<SCALAR,Dynamic,Dynamic> > lu(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/QR"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
FullPivHouseholderQR<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/SVD"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
JacobiSVD<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/Cholesky"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
LDLT<Matrix<SCALAR,Dynamic,Dynamic> > ldlt(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/Cholesky"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
LLT<Matrix<SCALAR,Dynamic,Dynamic> > llt(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/LU"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
PartialPivLU<Matrix<SCALAR,Dynamic,Dynamic> > lu(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#include "../Eigen/QR"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define SCALAR int
|
||||
#else
|
||||
#define SCALAR float
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
int main()
|
||||
{
|
||||
HouseholderQR<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
#include "../Eigen/Core"
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
#define CV_QUALIFIER const
|
||||
#else
|
||||
#define CV_QUALIFIER
|
||||
#endif
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
void call_ref(Ref<VectorXf> a) { }
|
||||
|
||||
int main()
|
||||
{
|
||||
VectorXf a(10);
|
||||
CV_QUALIFIER VectorXf& ac(a);
|
||||
call_ref(ac);
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
#include "../Eigen/Core"
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
void call_ref(Ref<VectorXf> a) { }
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf A(10,10);
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
call_ref(A.row(3));
|
||||
#else
|
||||
call_ref(A.col(3));
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
#include "../Eigen/Core"
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
void call_ref(Ref<VectorXf> a) { }
|
||||
#else
|
||||
void call_ref(const Ref<const VectorXf> &a) { }
|
||||
#endif
|
||||
|
||||
int main()
|
||||
{
|
||||
VectorXf a(10);
|
||||
call_ref(a+a);
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
#include "../Eigen/Core"
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
void call_ref(Ref<MatrixXf,0,OuterStride<> > a) {}
|
||||
|
||||
int main()
|
||||
{
|
||||
MatrixXf A(10,10);
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
call_ref(A.transpose());
|
||||
#else
|
||||
call_ref(A);
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
#include "../Eigen/Core"
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
void call_ref(Ref<VectorXf> a) { }
|
||||
|
||||
int main()
|
||||
{
|
||||
VectorXf a(10);
|
||||
DenseBase<VectorXf> &ac(a);
|
||||
#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
|
||||
call_ref(ac);
|
||||
#else
|
||||
call_ref(ac.derived());
|
||||
#endif
|
||||
}
|
|
@ -283,3 +283,9 @@ mark_as_advanced(EIGEN_TEST_EIGEN2)
|
|||
if(EIGEN_TEST_EIGEN2)
|
||||
add_subdirectory(eigen2)
|
||||
endif()
|
||||
|
||||
|
||||
option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF)
|
||||
IF(EIGEN_TEST_BUILD_DOCUMENTATION)
|
||||
add_dependencies(buildtests doc)
|
||||
ENDIF()
|
||||
|
|
|
@ -109,6 +109,8 @@ template<typename ArrayType> void comparisons(const ArrayType& m)
|
|||
VERIFY(! (m1 < m3).all() );
|
||||
VERIFY(! (m1 > m3).all() );
|
||||
}
|
||||
VERIFY(!(m1 > m2 && m1 < m2).any());
|
||||
VERIFY((m1 <= m2 || m1 >= m2).all());
|
||||
|
||||
// comparisons to scalar
|
||||
VERIFY( (m1 != (m1(r,c)+1) ).any() );
|
||||
|
|
|
@ -102,6 +102,7 @@ template<typename MatrixType> void comparisons(const MatrixType& m)
|
|||
VERIFY( (m1.array() > (m1(r,c)-1) ).any() );
|
||||
VERIFY( (m1.array() < (m1(r,c)+1) ).any() );
|
||||
VERIFY( (m1.array() == m1(r,c) ).any() );
|
||||
VERIFY( m1.cwiseEqual(m1(r,c)).any() );
|
||||
|
||||
// test Select
|
||||
VERIFY_IS_APPROX( (m1.array()<m2.array()).select(m1,m2), m1.cwiseMin(m2) );
|
||||
|
|
|
@ -12,13 +12,15 @@
|
|||
|
||||
template<typename T> void test_conjugate_gradient_T()
|
||||
{
|
||||
ConjugateGradient<SparseMatrix<T>, Lower> cg_colmajor_lower_diag;
|
||||
ConjugateGradient<SparseMatrix<T>, Upper> cg_colmajor_upper_diag;
|
||||
ConjugateGradient<SparseMatrix<T>, Lower > cg_colmajor_lower_diag;
|
||||
ConjugateGradient<SparseMatrix<T>, Upper > cg_colmajor_upper_diag;
|
||||
ConjugateGradient<SparseMatrix<T>, Lower|Upper> cg_colmajor_loup_diag;
|
||||
ConjugateGradient<SparseMatrix<T>, Lower, IdentityPreconditioner> cg_colmajor_lower_I;
|
||||
ConjugateGradient<SparseMatrix<T>, Upper, IdentityPreconditioner> cg_colmajor_upper_I;
|
||||
|
||||
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_diag) );
|
||||
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_diag) );
|
||||
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_loup_diag) );
|
||||
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_I) );
|
||||
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) );
|
||||
}
|
||||
|
|
|
@ -172,6 +172,8 @@ void test_geo_alignedbox()
|
|||
CALL_SUBTEST_9( alignedbox(AlignedBox1i()) );
|
||||
CALL_SUBTEST_10( alignedbox(AlignedBox2i()) );
|
||||
CALL_SUBTEST_11( alignedbox(AlignedBox3i()) );
|
||||
|
||||
CALL_SUBTEST_14( alignedbox(AlignedBox<double,Dynamic>(4)) );
|
||||
}
|
||||
CALL_SUBTEST_12( specificTest1() );
|
||||
CALL_SUBTEST_13( specificTest2() );
|
||||
|
|
|
@ -100,7 +100,9 @@ template<typename MatrixType> void lu_invertible()
|
|||
LU.h
|
||||
*/
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
int size = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
|
||||
DenseIndex size = MatrixType::RowsAtCompileTime;
|
||||
if( size==Dynamic)
|
||||
size = internal::random<DenseIndex>(1,EIGEN_TEST_MAX_SIZE);
|
||||
|
||||
MatrixType m1(size, size), m2(size, size), m3(size, size);
|
||||
FullPivLU<MatrixType> lu;
|
||||
|
@ -122,6 +124,10 @@ template<typename MatrixType> void lu_invertible()
|
|||
m2 = lu.solve(m3);
|
||||
VERIFY_IS_APPROX(m3, m1*m2);
|
||||
VERIFY_IS_APPROX(m2, lu.inverse()*m3);
|
||||
|
||||
// Regression test for Bug 302
|
||||
MatrixType m4 = MatrixType::Random(size,size);
|
||||
VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4);
|
||||
}
|
||||
|
||||
template<typename MatrixType> void lu_partial_piv()
|
||||
|
@ -171,6 +177,7 @@ void test_lu()
|
|||
{
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
CALL_SUBTEST_1( lu_non_invertible<Matrix3f>() );
|
||||
CALL_SUBTEST_1( lu_invertible<Matrix3f>() );
|
||||
CALL_SUBTEST_1( lu_verify_assert<Matrix3f>() );
|
||||
|
||||
CALL_SUBTEST_2( (lu_non_invertible<Matrix<double, 4, 6> >()) );
|
||||
|
|
|
@ -114,6 +114,28 @@ template<typename PlainObjectType> void check_const_correctness(const PlainObjec
|
|||
VERIFY( !(Map<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
void map_not_aligned_on_scalar()
|
||||
{
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
|
||||
typedef typename MatrixType::Index Index;
|
||||
Index size = 11;
|
||||
Scalar* array1 = internal::aligned_new<Scalar>((size+1)*(size+1)+1);
|
||||
Scalar* array2 = reinterpret_cast<Scalar*>(sizeof(Scalar)/2+std::size_t(array1));
|
||||
Map<MatrixType,0,OuterStride<> > map2(array2, size, size, OuterStride<>(size+1));
|
||||
MatrixType m2 = MatrixType::Random(size,size);
|
||||
map2 = m2;
|
||||
VERIFY_IS_EQUAL(m2, map2);
|
||||
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
Map<VectorType> map3(array2, size);
|
||||
MatrixType v3 = VectorType::Random(size);
|
||||
map3 = v3;
|
||||
VERIFY_IS_EQUAL(v3, map3);
|
||||
|
||||
internal::aligned_delete(array1, (size+1)*(size+1)+1);
|
||||
}
|
||||
|
||||
void test_mapped_matrix()
|
||||
{
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
|
@ -137,5 +159,7 @@ void test_mapped_matrix()
|
|||
CALL_SUBTEST_8( map_static_methods(RowVector3d()) );
|
||||
CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) );
|
||||
CALL_SUBTEST_10( map_static_methods(VectorXf(12)) );
|
||||
|
||||
CALL_SUBTEST_11( map_not_aligned_on_scalar<double>() );
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue