Merge remote-tracking branch 'origin/develop' into feature/ImuFactorPush2
Conflicts: gtsam/navigation/tests/testImuFactor.cpprelease/4.3a0
commit
a03924eb85
|
@ -131,7 +131,7 @@ endif()
|
||||||
|
|
||||||
if(NOT (${Boost_VERSION} LESS 105600))
|
if(NOT (${Boost_VERSION} LESS 105600))
|
||||||
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
|
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
|
||||||
add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES)
|
add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES -DBOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
|
@ -34,19 +34,19 @@ if(NOT FIRST_PASS_DONE)
|
||||||
set(CMAKE_MODULE_LINKER_FLAGS_PROFILING "${CMAKE_MODULE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
|
set(CMAKE_MODULE_LINKER_FLAGS_PROFILING "${CMAKE_MODULE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
|
||||||
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING CMAKE_EXE_LINKER_FLAGS_PROFILING CMAKE_SHARED_LINKER_FLAGS_PROFILING CMAKE_MODULE_LINKER_FLAGS_PROFILING)
|
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING CMAKE_EXE_LINKER_FLAGS_PROFILING CMAKE_SHARED_LINKER_FLAGS_PROFILING CMAKE_MODULE_LINKER_FLAGS_PROFILING)
|
||||||
else()
|
else()
|
||||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
|
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -std=c11 -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -std=c++11 -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
|
||||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
|
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-std=c11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
|
||||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
|
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-std=c++11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
|
||||||
set(CMAKE_C_FLAGS_RELEASE "-O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
|
set(CMAKE_C_FLAGS_RELEASE "-std=c11 -O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
|
set(CMAKE_CXX_FLAGS_RELEASE "-std=c++11 -O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
|
||||||
set(CMAKE_C_FLAGS_TIMING "${CMAKE_C_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
|
set(CMAKE_C_FLAGS_TIMING "${CMAKE_C_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
|
||||||
set(CMAKE_CXX_FLAGS_TIMING "${CMAKE_CXX_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
|
set(CMAKE_CXX_FLAGS_TIMING "${CMAKE_CXX_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
|
||||||
set(CMAKE_EXE_LINKER_FLAGS_TIMING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during timing builds." FORCE)
|
set(CMAKE_EXE_LINKER_FLAGS_TIMING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during timing builds." FORCE)
|
||||||
set(CMAKE_SHARED_LINKER_FLAGS_TIMING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during timing builds." FORCE)
|
set(CMAKE_SHARED_LINKER_FLAGS_TIMING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during timing builds." FORCE)
|
||||||
mark_as_advanced(CMAKE_C_FLAGS_TIMING CMAKE_CXX_FLAGS_TIMING CMAKE_EXE_LINKER_FLAGS_TIMING CMAKE_SHARED_LINKER_FLAGS_TIMING)
|
mark_as_advanced(CMAKE_C_FLAGS_TIMING CMAKE_CXX_FLAGS_TIMING CMAKE_EXE_LINKER_FLAGS_TIMING CMAKE_SHARED_LINKER_FLAGS_TIMING)
|
||||||
set(CMAKE_C_FLAGS_PROFILING "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
|
set(CMAKE_C_FLAGS_PROFILING "-std=c11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
|
||||||
set(CMAKE_CXX_FLAGS_PROFILING "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
|
set(CMAKE_CXX_FLAGS_PROFILING "-std=c++11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
|
||||||
set(CMAKE_EXE_LINKER_FLAGS_PROFILING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
|
set(CMAKE_EXE_LINKER_FLAGS_PROFILING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
|
||||||
set(CMAKE_SHARED_LINKER_FLAGS_PROFILING "${CMAKE__LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
|
set(CMAKE_SHARED_LINKER_FLAGS_PROFILING "${CMAKE__LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
|
||||||
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING CMAKE_EXE_LINKER_FLAGS_PROFILING CMAKE_SHARED_LINKER_FLAGS_PROFILING)
|
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING CMAKE_EXE_LINKER_FLAGS_PROFILING CMAKE_SHARED_LINKER_FLAGS_PROFILING)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
||||||
node: 10219c95fe653d4962aa9db4946f6fbea96dd740
|
node: b30b87236a1b1552af32ac34075ee5696a9b5a33
|
||||||
branch: 3.2
|
branch: 3.2
|
||||||
tag: 3.2.4
|
tag: 3.2.7
|
||||||
|
|
|
@ -27,3 +27,6 @@ ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
|
||||||
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
|
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
|
||||||
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
|
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
|
||||||
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3
|
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3
|
||||||
|
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
|
||||||
|
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
|
||||||
|
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6
|
||||||
|
|
|
@ -301,7 +301,7 @@ if(EIGEN_INCLUDE_INSTALL_DIR)
|
||||||
)
|
)
|
||||||
else()
|
else()
|
||||||
set(INCLUDE_INSTALL_DIR
|
set(INCLUDE_INSTALL_DIR
|
||||||
"${CMAKE_INSTALL_PREFIX}/include/eigen3"
|
"include/eigen3"
|
||||||
CACHE INTERNAL
|
CACHE INTERNAL
|
||||||
"The directory where we install the header files (internal)"
|
"The directory where we install the header files (internal)"
|
||||||
)
|
)
|
||||||
|
@ -404,7 +404,7 @@ if(cmake_generator_tolower MATCHES "makefile")
|
||||||
message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:")
|
message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:")
|
||||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath")
|
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath")
|
||||||
message(STATUS " | Eigen headers will then be installed to:")
|
message(STATUS " | Eigen headers will then be installed to:")
|
||||||
message(STATUS " | ${INCLUDE_INSTALL_DIR}")
|
message(STATUS " | ${CMAKE_INSTALL_PREFIX}/${INCLUDE_INSTALL_DIR}")
|
||||||
message(STATUS " | To install Eigen headers to a separate location, do:")
|
message(STATUS " | To install Eigen headers to a separate location, do:")
|
||||||
message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath")
|
message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath")
|
||||||
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
||||||
|
|
|
@ -123,7 +123,7 @@
|
||||||
#undef bool
|
#undef bool
|
||||||
#undef vector
|
#undef vector
|
||||||
#undef pixel
|
#undef pixel
|
||||||
#elif defined __ARM_NEON__
|
#elif defined __ARM_NEON
|
||||||
#define EIGEN_VECTORIZE
|
#define EIGEN_VECTORIZE
|
||||||
#define EIGEN_VECTORIZE_NEON
|
#define EIGEN_VECTORIZE_NEON
|
||||||
#include <arm_neon.h>
|
#include <arm_neon.h>
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
/**
|
/**
|
||||||
* \defgroup SparseCore_Module SparseCore module
|
* \defgroup SparseCore_Module SparseCore module
|
||||||
*
|
*
|
||||||
* This module provides a sparse matrix representation, and basic associatd matrix manipulations
|
* This module provides a sparse matrix representation, and basic associated matrix manipulations
|
||||||
* and operations.
|
* and operations.
|
||||||
*
|
*
|
||||||
* See the \ref TutorialSparse "Sparse tutorial"
|
* See the \ref TutorialSparse "Sparse tutorial"
|
||||||
|
|
|
@ -235,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
/** \internal
|
/** \internal
|
||||||
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
||||||
|
@ -434,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
|
||||||
template<typename MatrixType, int _UpLo>
|
template<typename MatrixType, int _UpLo>
|
||||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
eigen_assert(a.rows()==a.cols());
|
eigen_assert(a.rows()==a.cols());
|
||||||
const Index size = a.rows();
|
const Index size = a.rows();
|
||||||
|
|
||||||
|
@ -457,7 +464,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||||
*/
|
*/
|
||||||
template<typename MatrixType, int _UpLo>
|
template<typename MatrixType, int _UpLo>
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
|
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
|
||||||
{
|
{
|
||||||
const Index size = w.rows();
|
const Index size = w.rows();
|
||||||
if (m_isInitialized)
|
if (m_isInitialized)
|
||||||
|
|
|
@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||||
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
/** \internal
|
/** \internal
|
||||||
* Used to compute and store L
|
* Used to compute and store L
|
||||||
* The strict upper part is not used and even not initialized.
|
* The strict upper part is not used and even not initialized.
|
||||||
|
@ -283,7 +289,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
|
||||||
return k;
|
return k;
|
||||||
mat.coeffRef(k,k) = x = sqrt(x);
|
mat.coeffRef(k,k) = x = sqrt(x);
|
||||||
if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
|
if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
|
||||||
if (rs>0) A21 *= RealScalar(1)/x;
|
if (rs>0) A21 /= x;
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
|
||||||
template<typename MatrixType, int _UpLo>
|
template<typename MatrixType, int _UpLo>
|
||||||
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
eigen_assert(a.rows()==a.cols());
|
eigen_assert(a.rows()==a.cols());
|
||||||
const Index size = a.rows();
|
const Index size = a.rows();
|
||||||
m_matrix.resize(size, size);
|
m_matrix.resize(size, size);
|
||||||
|
|
|
@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
|
||||||
lda = m.outerStride(); \
|
lda = m.outerStride(); \
|
||||||
\
|
\
|
||||||
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
|
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
|
||||||
info = (info==0) ? -1 : 1; \
|
info = (info==0) ? -1 : info>0 ? info-1 : size; \
|
||||||
return info; \
|
return info; \
|
||||||
} \
|
} \
|
||||||
}; \
|
}; \
|
||||||
|
|
|
@ -78,7 +78,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
|
||||||
{
|
{
|
||||||
res.itype = CHOLMOD_INT;
|
res.itype = CHOLMOD_INT;
|
||||||
}
|
}
|
||||||
else if (internal::is_same<_Index,UF_long>::value)
|
else if (internal::is_same<_Index,SuiteSparse_long>::value)
|
||||||
{
|
{
|
||||||
res.itype = CHOLMOD_LONG;
|
res.itype = CHOLMOD_LONG;
|
||||||
}
|
}
|
||||||
|
@ -395,7 +395,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
|
||||||
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
|
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
compute(matrix);
|
Base::compute(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
~CholmodSimplicialLLT() {}
|
~CholmodSimplicialLLT() {}
|
||||||
|
@ -442,7 +442,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
|
||||||
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
|
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
compute(matrix);
|
Base::compute(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
~CholmodSimplicialLDLT() {}
|
~CholmodSimplicialLDLT() {}
|
||||||
|
@ -487,7 +487,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
|
||||||
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
|
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
compute(matrix);
|
Base::compute(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
~CholmodSupernodalLLT() {}
|
~CholmodSupernodalLLT() {}
|
||||||
|
@ -534,7 +534,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
|
||||||
CholmodDecomposition(const MatrixType& matrix) : Base()
|
CholmodDecomposition(const MatrixType& matrix) : Base()
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
compute(matrix);
|
Base::compute(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
~CholmodDecomposition() {}
|
~CholmodDecomposition() {}
|
||||||
|
|
|
@ -124,6 +124,21 @@ class Array
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
|
Array(Array&& other)
|
||||||
|
: Base(std::move(other))
|
||||||
|
{
|
||||||
|
Base::_check_template_params();
|
||||||
|
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
|
||||||
|
Base::_set_noalias(other);
|
||||||
|
}
|
||||||
|
Array& operator=(Array&& other)
|
||||||
|
{
|
||||||
|
other.swap(*this);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
|
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||||
*
|
*
|
||||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
||||||
|
|
|
@ -46,9 +46,6 @@ template<typename Derived> class ArrayBase
|
||||||
|
|
||||||
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
|
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
|
||||||
|
|
||||||
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
|
||||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
|
||||||
|
|
||||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||||
typedef typename internal::traits<Derived>::Index Index;
|
typedef typename internal::traits<Derived>::Index Index;
|
||||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||||
|
@ -56,6 +53,7 @@ template<typename Derived> class ArrayBase
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||||
|
|
||||||
typedef DenseBase<Derived> Base;
|
typedef DenseBase<Derived> Base;
|
||||||
|
using Base::operator*;
|
||||||
using Base::RowsAtCompileTime;
|
using Base::RowsAtCompileTime;
|
||||||
using Base::ColsAtCompileTime;
|
using Base::ColsAtCompileTime;
|
||||||
using Base::SizeAtCompileTime;
|
using Base::SizeAtCompileTime;
|
||||||
|
|
|
@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
|
||||||
typedef typename Derived1::Index Index;
|
typedef typename Derived1::Index Index;
|
||||||
static inline void run(Derived1 &dst, const Derived2 &src)
|
static inline void run(Derived1 &dst, const Derived2 &src)
|
||||||
{
|
{
|
||||||
typedef packet_traits<typename Derived1::Scalar> PacketTraits;
|
typedef typename Derived1::Scalar Scalar;
|
||||||
|
typedef packet_traits<Scalar> PacketTraits;
|
||||||
enum {
|
enum {
|
||||||
packetSize = PacketTraits::size,
|
packetSize = PacketTraits::size,
|
||||||
alignable = PacketTraits::AlignedOnScalar,
|
alignable = PacketTraits::AlignedOnScalar,
|
||||||
dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
|
dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
|
||||||
|
dstAlignment = alignable ? Aligned : int(dstIsAligned),
|
||||||
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
|
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
|
||||||
};
|
};
|
||||||
|
const Scalar *dst_ptr = &dst.coeffRef(0,0);
|
||||||
|
if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0)
|
||||||
|
{
|
||||||
|
// the pointer is not aligend-on scalar, so alignment is not possible
|
||||||
|
return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
|
||||||
|
}
|
||||||
const Index packetAlignedMask = packetSize - 1;
|
const Index packetAlignedMask = packetSize - 1;
|
||||||
const Index innerSize = dst.innerSize();
|
const Index innerSize = dst.innerSize();
|
||||||
const Index outerSize = dst.outerSize();
|
const Index outerSize = dst.outerSize();
|
||||||
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
|
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
|
||||||
Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
|
Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
|
||||||
: internal::first_aligned(&dst.coeffRef(0,0), innerSize);
|
|
||||||
|
|
||||||
for(Index outer = 0; outer < outerSize; ++outer)
|
for(Index outer = 0; outer < outerSize; ++outer)
|
||||||
{
|
{
|
||||||
|
|
|
@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
||||||
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
|
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
|
||||||
: int(traits<XprType>::MaxColsAtCompileTime),
|
: int(traits<XprType>::MaxColsAtCompileTime),
|
||||||
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
|
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
|
||||||
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
IsDense = is_same<StorageKind,Dense>::value,
|
||||||
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||||
|
: (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||||
: XprTypeIsRowMajor,
|
: XprTypeIsRowMajor,
|
||||||
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
||||||
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||||
|
|
|
@ -1,154 +0,0 @@
|
||||||
// This file is part of Eigen, a lightweight C++ template library
|
|
||||||
// for linear algebra.
|
|
||||||
//
|
|
||||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
|
||||||
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla
|
|
||||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
|
||||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
#ifndef EIGEN_COMMAINITIALIZER_H
|
|
||||||
#define EIGEN_COMMAINITIALIZER_H
|
|
||||||
|
|
||||||
namespace Eigen {
|
|
||||||
|
|
||||||
/** \class CommaInitializer
|
|
||||||
* \ingroup Core_Module
|
|
||||||
*
|
|
||||||
* \brief Helper class used by the comma initializer operator
|
|
||||||
*
|
|
||||||
* This class is internally used to implement the comma initializer feature. It is
|
|
||||||
* the return type of MatrixBase::operator<<, and most of the time this is the only
|
|
||||||
* way it is used.
|
|
||||||
*
|
|
||||||
* \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
|
|
||||||
*/
|
|
||||||
template<typename XprType>
|
|
||||||
struct CommaInitializer
|
|
||||||
{
|
|
||||||
typedef typename XprType::Scalar Scalar;
|
|
||||||
typedef typename XprType::Index Index;
|
|
||||||
|
|
||||||
inline CommaInitializer(XprType& xpr, const Scalar& s)
|
|
||||||
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
|
|
||||||
{
|
|
||||||
m_xpr.coeffRef(0,0) = s;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename OtherDerived>
|
|
||||||
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
|
|
||||||
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
|
|
||||||
{
|
|
||||||
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Copy/Move constructor which transfers ownership. This is crucial in
|
|
||||||
* absence of return value optimization to avoid assertions during destruction. */
|
|
||||||
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
|
||||||
inline CommaInitializer(const CommaInitializer& o)
|
|
||||||
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
|
|
||||||
// Mark original object as finished. In absence of R-value references we need to const_cast:
|
|
||||||
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
|
|
||||||
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
|
|
||||||
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* inserts a scalar value in the target matrix */
|
|
||||||
CommaInitializer& operator,(const Scalar& s)
|
|
||||||
{
|
|
||||||
if (m_col==m_xpr.cols())
|
|
||||||
{
|
|
||||||
m_row+=m_currentBlockRows;
|
|
||||||
m_col = 0;
|
|
||||||
m_currentBlockRows = 1;
|
|
||||||
eigen_assert(m_row<m_xpr.rows()
|
|
||||||
&& "Too many rows passed to comma initializer (operator<<)");
|
|
||||||
}
|
|
||||||
eigen_assert(m_col<m_xpr.cols()
|
|
||||||
&& "Too many coefficients passed to comma initializer (operator<<)");
|
|
||||||
eigen_assert(m_currentBlockRows==1);
|
|
||||||
m_xpr.coeffRef(m_row, m_col++) = s;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* inserts a matrix expression in the target matrix */
|
|
||||||
template<typename OtherDerived>
|
|
||||||
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
|
|
||||||
{
|
|
||||||
if(other.cols()==0 || other.rows()==0)
|
|
||||||
return *this;
|
|
||||||
if (m_col==m_xpr.cols())
|
|
||||||
{
|
|
||||||
m_row+=m_currentBlockRows;
|
|
||||||
m_col = 0;
|
|
||||||
m_currentBlockRows = other.rows();
|
|
||||||
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
|
|
||||||
&& "Too many rows passed to comma initializer (operator<<)");
|
|
||||||
}
|
|
||||||
eigen_assert(m_col<m_xpr.cols()
|
|
||||||
&& "Too many coefficients passed to comma initializer (operator<<)");
|
|
||||||
eigen_assert(m_currentBlockRows==other.rows());
|
|
||||||
if (OtherDerived::SizeAtCompileTime != Dynamic)
|
|
||||||
m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
|
|
||||||
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
|
|
||||||
(m_row, m_col) = other;
|
|
||||||
else
|
|
||||||
m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
|
|
||||||
m_col += other.cols();
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline ~CommaInitializer()
|
|
||||||
{
|
|
||||||
eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
|
|
||||||
&& m_col == m_xpr.cols()
|
|
||||||
&& "Too few coefficients passed to comma initializer (operator<<)");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** \returns the built matrix once all its coefficients have been set.
|
|
||||||
* Calling finished is 100% optional. Its purpose is to write expressions
|
|
||||||
* like this:
|
|
||||||
* \code
|
|
||||||
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
|
|
||||||
* \endcode
|
|
||||||
*/
|
|
||||||
inline XprType& finished() { return m_xpr; }
|
|
||||||
|
|
||||||
XprType& m_xpr; // target expression
|
|
||||||
Index m_row; // current row id
|
|
||||||
Index m_col; // current col id
|
|
||||||
Index m_currentBlockRows; // current block height
|
|
||||||
};
|
|
||||||
|
|
||||||
/** \anchor MatrixBaseCommaInitRef
|
|
||||||
* Convenient operator to set the coefficients of a matrix.
|
|
||||||
*
|
|
||||||
* The coefficients must be provided in a row major order and exactly match
|
|
||||||
* the size of the matrix. Otherwise an assertion is raised.
|
|
||||||
*
|
|
||||||
* Example: \include MatrixBase_set.cpp
|
|
||||||
* Output: \verbinclude MatrixBase_set.out
|
|
||||||
*
|
|
||||||
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
|
|
||||||
*
|
|
||||||
* \sa CommaInitializer::finished(), class CommaInitializer
|
|
||||||
*/
|
|
||||||
template<typename Derived>
|
|
||||||
inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
|
|
||||||
{
|
|
||||||
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** \sa operator<<(const Scalar&) */
|
|
||||||
template<typename Derived>
|
|
||||||
template<typename OtherDerived>
|
|
||||||
inline CommaInitializer<Derived>
|
|
||||||
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
|
|
||||||
{
|
|
||||||
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end namespace Eigen
|
|
||||||
|
|
||||||
#endif // EIGEN_COMMAINITIALIZER_H
|
|
|
@ -81,7 +81,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
|
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
|
||||||
CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
|
Cost0 = EIGEN_ADD_COST(LhsCoeffReadCost,RhsCoeffReadCost),
|
||||||
|
CoeffReadCost = EIGEN_ADD_COST(Cost0,functor_traits<BinaryOp>::Cost)
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
|
@ -47,7 +47,7 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
|
||||||
Flags = _XprTypeNested::Flags & (
|
Flags = _XprTypeNested::Flags & (
|
||||||
HereditaryBits | LinearAccessBit | AlignedBit
|
HereditaryBits | LinearAccessBit | AlignedBit
|
||||||
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
|
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
|
||||||
CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
|
CoeffReadCost = EIGEN_ADD_COST(_XprTypeNested::CoeffReadCost, functor_traits<UnaryOp>::Cost)
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,14 +41,13 @@ static inline void check_DenseIndex_is_signed() {
|
||||||
template<typename Derived> class DenseBase
|
template<typename Derived> class DenseBase
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
|
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
||||||
|
DenseCoeffsBase<Derived> >
|
||||||
#else
|
#else
|
||||||
: public DenseCoeffsBase<Derived>
|
: public DenseCoeffsBase<Derived>
|
||||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
|
||||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
|
||||||
|
|
||||||
class InnerIterator;
|
class InnerIterator;
|
||||||
|
|
||||||
|
@ -63,8 +62,9 @@ template<typename Derived> class DenseBase
|
||||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||||
|
typedef internal::special_scalar_op_base<Derived,Scalar,RealScalar, DenseCoeffsBase<Derived> > Base;
|
||||||
|
|
||||||
typedef DenseCoeffsBase<Derived> Base;
|
using Base::operator*;
|
||||||
using Base::derived;
|
using Base::derived;
|
||||||
using Base::const_cast_derived;
|
using Base::const_cast_derived;
|
||||||
using Base::rows;
|
using Base::rows;
|
||||||
|
@ -183,10 +183,6 @@ template<typename Derived> class DenseBase
|
||||||
/** \returns the number of nonzero coefficients which is in practice the number
|
/** \returns the number of nonzero coefficients which is in practice the number
|
||||||
* of stored coefficients. */
|
* of stored coefficients. */
|
||||||
inline Index nonZeros() const { return size(); }
|
inline Index nonZeros() const { return size(); }
|
||||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
|
||||||
* In other words, this function returns
|
|
||||||
* \code rows()==1 || cols()==1 \endcode
|
|
||||||
* \sa rows(), cols(), IsVectorAtCompileTime. */
|
|
||||||
|
|
||||||
/** \returns the outer size.
|
/** \returns the outer size.
|
||||||
*
|
*
|
||||||
|
@ -266,11 +262,13 @@ template<typename Derived> class DenseBase
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
/** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||||
/** Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
|
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
|
||||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
|
||||||
|
/** \internal Evaluates \a other into *this. \returns a reference to *this. */
|
||||||
|
template<typename OtherDerived>
|
||||||
|
Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
|
||||||
|
|
||||||
CommaInitializer<Derived> operator<< (const Scalar& s);
|
CommaInitializer<Derived> operator<< (const Scalar& s);
|
||||||
|
|
||||||
|
|
|
@ -122,33 +122,41 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
||||||
{
|
{
|
||||||
internal::plain_array<T,Size,_Options> m_data;
|
internal::plain_array<T,Size,_Options> m_data;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() {}
|
DenseStorage() {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
: m_data(internal::constructor_without_unaligned_array_assert()) {}
|
: m_data(internal::constructor_without_unaligned_array_assert()) {}
|
||||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {}
|
||||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
DenseStorage& operator=(const DenseStorage& other)
|
||||||
static inline DenseIndex rows(void) {return _Rows;}
|
{
|
||||||
static inline DenseIndex cols(void) {return _Cols;}
|
if (this != &other) m_data = other.m_data;
|
||||||
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
return *this;
|
||||||
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
}
|
||||||
inline const T *data() const { return m_data.array; }
|
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||||
inline T *data() { return m_data.array; }
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
||||||
|
static DenseIndex rows(void) {return _Rows;}
|
||||||
|
static DenseIndex cols(void) {return _Cols;}
|
||||||
|
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||||
|
void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||||
|
const T *data() const { return m_data.array; }
|
||||||
|
T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// null matrix
|
// null matrix
|
||||||
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
|
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() {}
|
DenseStorage() {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
DenseStorage(const DenseStorage&) {}
|
||||||
inline void swap(DenseStorage& ) {}
|
DenseStorage& operator=(const DenseStorage&) { return *this; }
|
||||||
static inline DenseIndex rows(void) {return _Rows;}
|
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||||
static inline DenseIndex cols(void) {return _Cols;}
|
void swap(DenseStorage& ) {}
|
||||||
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
static DenseIndex rows(void) {return _Rows;}
|
||||||
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
static DenseIndex cols(void) {return _Cols;}
|
||||||
inline const T *data() const { return 0; }
|
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||||
inline T *data() { return 0; }
|
void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||||
|
const T *data() const { return 0; }
|
||||||
|
T *data() { return 0; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// more specializations for null matrices; these are necessary to resolve ambiguities
|
// more specializations for null matrices; these are necessary to resolve ambiguities
|
||||||
|
@ -168,18 +176,29 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
|
||||||
DenseIndex m_rows;
|
DenseIndex m_rows;
|
||||||
DenseIndex m_cols;
|
DenseIndex m_cols;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() : m_rows(0), m_cols(0) {}
|
DenseStorage() : m_rows(0), m_cols(0) {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
|
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
|
||||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
|
||||||
inline void swap(DenseStorage& other)
|
DenseStorage& operator=(const DenseStorage& other)
|
||||||
|
{
|
||||||
|
if (this != &other)
|
||||||
|
{
|
||||||
|
m_data = other.m_data;
|
||||||
|
m_rows = other.m_rows;
|
||||||
|
m_cols = other.m_cols;
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||||
|
void swap(DenseStorage& other)
|
||||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||||
inline DenseIndex rows() const {return m_rows;}
|
DenseIndex rows() const {return m_rows;}
|
||||||
inline DenseIndex cols() const {return m_cols;}
|
DenseIndex cols() const {return m_cols;}
|
||||||
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||||
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||||
inline const T *data() const { return m_data.array; }
|
const T *data() const { return m_data.array; }
|
||||||
inline T *data() { return m_data.array; }
|
T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// dynamic-size matrix with fixed-size storage and fixed width
|
// dynamic-size matrix with fixed-size storage and fixed width
|
||||||
|
@ -188,17 +207,27 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
|
||||||
internal::plain_array<T,Size,_Options> m_data;
|
internal::plain_array<T,Size,_Options> m_data;
|
||||||
DenseIndex m_rows;
|
DenseIndex m_rows;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() : m_rows(0) {}
|
DenseStorage() : m_rows(0) {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
|
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
|
||||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
|
||||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
DenseStorage& operator=(const DenseStorage& other)
|
||||||
inline DenseIndex rows(void) const {return m_rows;}
|
{
|
||||||
inline DenseIndex cols(void) const {return _Cols;}
|
if (this != &other)
|
||||||
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
{
|
||||||
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
m_data = other.m_data;
|
||||||
inline const T *data() const { return m_data.array; }
|
m_rows = other.m_rows;
|
||||||
inline T *data() { return m_data.array; }
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
||||||
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||||
|
DenseIndex rows(void) const {return m_rows;}
|
||||||
|
DenseIndex cols(void) const {return _Cols;}
|
||||||
|
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||||
|
void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||||
|
const T *data() const { return m_data.array; }
|
||||||
|
T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// dynamic-size matrix with fixed-size storage and fixed height
|
// dynamic-size matrix with fixed-size storage and fixed height
|
||||||
|
@ -207,17 +236,27 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
|
||||||
internal::plain_array<T,Size,_Options> m_data;
|
internal::plain_array<T,Size,_Options> m_data;
|
||||||
DenseIndex m_cols;
|
DenseIndex m_cols;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() : m_cols(0) {}
|
DenseStorage() : m_cols(0) {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
|
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
|
||||||
inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
|
||||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
DenseStorage& operator=(const DenseStorage& other)
|
||||||
inline DenseIndex rows(void) const {return _Rows;}
|
{
|
||||||
inline DenseIndex cols(void) const {return m_cols;}
|
if (this != &other)
|
||||||
inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
{
|
||||||
inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
m_data = other.m_data;
|
||||||
inline const T *data() const { return m_data.array; }
|
m_cols = other.m_cols;
|
||||||
inline T *data() { return m_data.array; }
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
||||||
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||||
|
DenseIndex rows(void) const {return _Rows;}
|
||||||
|
DenseIndex cols(void) const {return m_cols;}
|
||||||
|
void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||||
|
void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||||
|
const T *data() const { return m_data.array; }
|
||||||
|
T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// purely dynamic matrix.
|
// purely dynamic matrix.
|
||||||
|
@ -227,18 +266,35 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||||
DenseIndex m_rows;
|
DenseIndex m_rows;
|
||||||
DenseIndex m_cols;
|
DenseIndex m_cols;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
: m_data(0), m_rows(0), m_cols(0) {}
|
: m_data(0), m_rows(0), m_cols(0) {}
|
||||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
|
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
|
||||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
inline void swap(DenseStorage& other)
|
DenseStorage(DenseStorage&& other)
|
||||||
|
: m_data(std::move(other.m_data))
|
||||||
|
, m_rows(std::move(other.m_rows))
|
||||||
|
, m_cols(std::move(other.m_cols))
|
||||||
|
{
|
||||||
|
other.m_data = nullptr;
|
||||||
|
}
|
||||||
|
DenseStorage& operator=(DenseStorage&& other)
|
||||||
|
{
|
||||||
|
using std::swap;
|
||||||
|
swap(m_data, other.m_data);
|
||||||
|
swap(m_rows, other.m_rows);
|
||||||
|
swap(m_cols, other.m_cols);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
||||||
|
void swap(DenseStorage& other)
|
||||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||||
inline DenseIndex rows(void) const {return m_rows;}
|
DenseIndex rows(void) const {return m_rows;}
|
||||||
inline DenseIndex cols(void) const {return m_cols;}
|
DenseIndex cols(void) const {return m_cols;}
|
||||||
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||||
{
|
{
|
||||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
|
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
|
||||||
m_rows = nbRows;
|
m_rows = nbRows;
|
||||||
|
@ -258,8 +314,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||||
m_rows = nbRows;
|
m_rows = nbRows;
|
||||||
m_cols = nbCols;
|
m_cols = nbCols;
|
||||||
}
|
}
|
||||||
inline const T *data() const { return m_data; }
|
const T *data() const { return m_data; }
|
||||||
inline T *data() { return m_data; }
|
T *data() { return m_data; }
|
||||||
|
private:
|
||||||
|
DenseStorage(const DenseStorage&);
|
||||||
|
DenseStorage& operator=(const DenseStorage&);
|
||||||
};
|
};
|
||||||
|
|
||||||
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
|
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
|
||||||
|
@ -268,15 +327,30 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||||
T *m_data;
|
T *m_data;
|
||||||
DenseIndex m_cols;
|
DenseIndex m_cols;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() : m_data(0), m_cols(0) {}
|
DenseStorage() : m_data(0), m_cols(0) {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||||
inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
DenseStorage(DenseStorage&& other)
|
||||||
static inline DenseIndex rows(void) {return _Rows;}
|
: m_data(std::move(other.m_data))
|
||||||
inline DenseIndex cols(void) const {return m_cols;}
|
, m_cols(std::move(other.m_cols))
|
||||||
inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
{
|
||||||
|
other.m_data = nullptr;
|
||||||
|
}
|
||||||
|
DenseStorage& operator=(DenseStorage&& other)
|
||||||
|
{
|
||||||
|
using std::swap;
|
||||||
|
swap(m_data, other.m_data);
|
||||||
|
swap(m_cols, other.m_cols);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
||||||
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||||
|
static DenseIndex rows(void) {return _Rows;}
|
||||||
|
DenseIndex cols(void) const {return m_cols;}
|
||||||
|
void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
||||||
{
|
{
|
||||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
||||||
m_cols = nbCols;
|
m_cols = nbCols;
|
||||||
|
@ -294,8 +368,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||||
}
|
}
|
||||||
m_cols = nbCols;
|
m_cols = nbCols;
|
||||||
}
|
}
|
||||||
inline const T *data() const { return m_data; }
|
const T *data() const { return m_data; }
|
||||||
inline T *data() { return m_data; }
|
T *data() { return m_data; }
|
||||||
|
private:
|
||||||
|
DenseStorage(const DenseStorage&);
|
||||||
|
DenseStorage& operator=(const DenseStorage&);
|
||||||
};
|
};
|
||||||
|
|
||||||
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
|
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
|
||||||
|
@ -304,15 +381,30 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||||
T *m_data;
|
T *m_data;
|
||||||
DenseIndex m_rows;
|
DenseIndex m_rows;
|
||||||
public:
|
public:
|
||||||
inline DenseStorage() : m_data(0), m_rows(0) {}
|
DenseStorage() : m_data(0), m_rows(0) {}
|
||||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
DenseStorage(DenseStorage&& other)
|
||||||
inline DenseIndex rows(void) const {return m_rows;}
|
: m_data(std::move(other.m_data))
|
||||||
static inline DenseIndex cols(void) {return _Cols;}
|
, m_rows(std::move(other.m_rows))
|
||||||
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
{
|
||||||
|
other.m_data = nullptr;
|
||||||
|
}
|
||||||
|
DenseStorage& operator=(DenseStorage&& other)
|
||||||
|
{
|
||||||
|
using std::swap;
|
||||||
|
swap(m_data, other.m_data);
|
||||||
|
swap(m_rows, other.m_rows);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
||||||
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||||
|
DenseIndex rows(void) const {return m_rows;}
|
||||||
|
static DenseIndex cols(void) {return _Cols;}
|
||||||
|
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
||||||
{
|
{
|
||||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
||||||
m_rows = nbRows;
|
m_rows = nbRows;
|
||||||
|
@ -330,8 +422,11 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||||
}
|
}
|
||||||
m_rows = nbRows;
|
m_rows = nbRows;
|
||||||
}
|
}
|
||||||
inline const T *data() const { return m_data; }
|
const T *data() const { return m_data; }
|
||||||
inline T *data() { return m_data; }
|
T *data() { return m_data; }
|
||||||
|
private:
|
||||||
|
DenseStorage(const DenseStorage&);
|
||||||
|
DenseStorage& operator=(const DenseStorage&);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
|
@ -34,8 +34,9 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
|
||||||
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
||||||
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
|
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
|
||||||
|
|
||||||
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||||
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
|
Cost0 = EIGEN_ADD_COST(NumTraits<Scalar>::MulCost, MatrixType::CoeffReadCost),
|
||||||
|
CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost)
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/** \internal
|
||||||
|
* \brief Template functors for comparison of two scalars
|
||||||
|
* \todo Implement packet-comparisons
|
||||||
|
*/
|
||||||
|
template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
|
||||||
|
|
||||||
|
template<typename Scalar, ComparisonName cmp>
|
||||||
|
struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
|
||||||
|
enum {
|
||||||
|
Cost = NumTraits<Scalar>::AddCost,
|
||||||
|
PacketAccess = false
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
template<ComparisonName Cmp, typename Scalar>
|
||||||
|
struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
|
||||||
|
typedef bool type;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
|
||||||
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||||
|
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
|
||||||
|
};
|
||||||
|
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
|
||||||
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||||
|
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
|
||||||
|
};
|
||||||
|
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
|
||||||
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||||
|
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
|
||||||
|
};
|
||||||
|
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
|
||||||
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||||
|
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
|
||||||
|
};
|
||||||
|
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
|
||||||
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||||
|
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
|
||||||
|
};
|
||||||
|
|
||||||
// unary functors:
|
// unary functors:
|
||||||
|
|
||||||
/** \internal
|
/** \internal
|
||||||
|
|
|
@ -257,7 +257,7 @@ template<typename Lhs, typename Rhs>
|
||||||
class GeneralProduct<Lhs, Rhs, OuterProduct>
|
class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||||
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
|
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
|
||||||
{
|
{
|
||||||
template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
|
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
|
||||||
|
@ -281,22 +281,22 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
inline void evalTo(Dest& dest) const {
|
inline void evalTo(Dest& dest) const {
|
||||||
internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
|
internal::outer_product_selector_run(*this, dest, set(), is_row_major<Dest>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
inline void addTo(Dest& dest) const {
|
inline void addTo(Dest& dest) const {
|
||||||
internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
|
internal::outer_product_selector_run(*this, dest, add(), is_row_major<Dest>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
inline void subTo(Dest& dest) const {
|
inline void subTo(Dest& dest) const {
|
||||||
internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
|
internal::outer_product_selector_run(*this, dest, sub(), is_row_major<Dest>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||||
{
|
{
|
||||||
internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
|
internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major<Dest>());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||||
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
|
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
|
||||||
}
|
}
|
||||||
|
|
||||||
inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||||
checkSanity();
|
checkSanity();
|
||||||
|
@ -157,7 +157,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||||
internal::inner_stride_at_compile_time<Derived>::ret==1),
|
internal::inner_stride_at_compile_time<Derived>::ret==1),
|
||||||
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
|
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
|
||||||
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
|
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
|
||||||
&& "data is not aligned");
|
&& "input pointer is not aligned on a 16 byte boundary");
|
||||||
}
|
}
|
||||||
|
|
||||||
PointerType m_data;
|
PointerType m_data;
|
||||||
|
|
|
@ -294,7 +294,7 @@ struct hypot_impl
|
||||||
RealScalar _x = abs(x);
|
RealScalar _x = abs(x);
|
||||||
RealScalar _y = abs(y);
|
RealScalar _y = abs(y);
|
||||||
RealScalar p = (max)(_x, _y);
|
RealScalar p = (max)(_x, _y);
|
||||||
if(p==RealScalar(0)) return 0;
|
if(p==RealScalar(0)) return RealScalar(0);
|
||||||
RealScalar q = (min)(_x, _y);
|
RealScalar q = (min)(_x, _y);
|
||||||
RealScalar qp = q/p;
|
RealScalar qp = q/p;
|
||||||
return p * sqrt(RealScalar(1) + qp*qp);
|
return p * sqrt(RealScalar(1) + qp*qp);
|
||||||
|
|
|
@ -211,6 +211,21 @@ class Matrix
|
||||||
: Base(internal::constructor_without_unaligned_array_assert())
|
: Base(internal::constructor_without_unaligned_array_assert())
|
||||||
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
|
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
|
||||||
|
|
||||||
|
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
|
Matrix(Matrix&& other)
|
||||||
|
: Base(std::move(other))
|
||||||
|
{
|
||||||
|
Base::_check_template_params();
|
||||||
|
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
|
||||||
|
Base::_set_noalias(other);
|
||||||
|
}
|
||||||
|
Matrix& operator=(Matrix&& other)
|
||||||
|
{
|
||||||
|
other.swap(*this);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
|
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||||
*
|
*
|
||||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
||||||
|
|
|
@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
|
||||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||||
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
|
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
|
||||||
|
|
||||||
template<typename MatrixPower, typename Lhs, typename Rhs>
|
template<typename MatrixPower, typename Lhs, typename Rhs>
|
||||||
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
|
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
|
||||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
|
||||||
|
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& operator+=(const MatrixBase<OtherDerived>& other);
|
Derived& operator+=(const MatrixBase<OtherDerived>& other);
|
||||||
|
@ -442,6 +440,15 @@ template<typename Derived> class MatrixBase
|
||||||
template<typename OtherScalar>
|
template<typename OtherScalar>
|
||||||
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
|
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
|
||||||
|
|
||||||
|
///////// SparseCore module /////////
|
||||||
|
|
||||||
|
template<typename OtherDerived>
|
||||||
|
EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
|
||||||
|
cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const
|
||||||
|
{
|
||||||
|
return other.cwiseProduct(derived());
|
||||||
|
}
|
||||||
|
|
||||||
///////// MatrixFunctions module /////////
|
///////// MatrixFunctions module /////////
|
||||||
|
|
||||||
typedef typename internal::stem_function<Scalar>::type StemFunction;
|
typedef typename internal::stem_function<Scalar>::type StemFunction;
|
||||||
|
|
|
@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
|
||||||
template<typename Other> friend
|
template<typename Other> friend
|
||||||
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
|
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
|
||||||
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
|
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
|
||||||
|
|
||||||
|
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
|
||||||
|
*
|
||||||
|
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
|
||||||
|
*/
|
||||||
|
Index determinant() const
|
||||||
|
{
|
||||||
|
Index res = 1;
|
||||||
|
Index n = size();
|
||||||
|
Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
|
||||||
|
mask.fill(false);
|
||||||
|
Index r = 0;
|
||||||
|
while(r < n)
|
||||||
|
{
|
||||||
|
// search for the next seed
|
||||||
|
while(r<n && mask[r]) r++;
|
||||||
|
if(r>=n)
|
||||||
|
break;
|
||||||
|
// we got one, let's follow it until we are back to the seed
|
||||||
|
Index k0 = r++;
|
||||||
|
mask.coeffRef(k0) = true;
|
||||||
|
for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
|
||||||
|
{
|
||||||
|
mask.coeffRef(k) = true;
|
||||||
|
res = -res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
|
|
@ -437,6 +437,36 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
|
PlainObjectBase(PlainObjectBase&& other)
|
||||||
|
: m_storage( std::move(other.m_storage) )
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
PlainObjectBase& operator=(PlainObjectBase&& other)
|
||||||
|
{
|
||||||
|
using std::swap;
|
||||||
|
swap(m_storage, other.m_storage);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/** Copy constructor */
|
||||||
|
EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)
|
||||||
|
: m_storage()
|
||||||
|
{
|
||||||
|
_check_template_params();
|
||||||
|
lazyAssign(other);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename OtherDerived>
|
||||||
|
EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)
|
||||||
|
: m_storage()
|
||||||
|
{
|
||||||
|
_check_template_params();
|
||||||
|
lazyAssign(other);
|
||||||
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
|
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
|
||||||
: m_storage(a_size, nbRows, nbCols)
|
: m_storage(a_size, nbRows, nbCols)
|
||||||
{
|
{
|
||||||
|
@ -573,6 +603,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||||
: (rows() == other.rows() && cols() == other.cols())))
|
: (rows() == other.rows() && cols() == other.cols())))
|
||||||
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
|
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
|
||||||
EIGEN_ONLY_USED_FOR_DEBUG(other);
|
EIGEN_ONLY_USED_FOR_DEBUG(other);
|
||||||
|
if(this->size()==0)
|
||||||
|
resizeLike(other);
|
||||||
#else
|
#else
|
||||||
resizeLike(other);
|
resizeLike(other);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -247,8 +247,9 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Func, typename Derived>
|
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
|
||||||
struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
|
template<typename Func, typename Derived, int Unrolling>
|
||||||
|
struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
|
||||||
{
|
{
|
||||||
typedef typename Derived::Scalar Scalar;
|
typedef typename Derived::Scalar Scalar;
|
||||||
typedef typename packet_traits<Scalar>::type PacketScalar;
|
typedef typename packet_traits<Scalar>::type PacketScalar;
|
||||||
|
|
|
@ -108,7 +108,8 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
|
||||||
OuterStrideMatch = Derived::IsVectorAtCompileTime
|
OuterStrideMatch = Derived::IsVectorAtCompileTime
|
||||||
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
|
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
|
||||||
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
|
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
|
||||||
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
|
ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
|
||||||
|
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
|
||||||
};
|
};
|
||||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||||
};
|
};
|
||||||
|
@ -187,9 +188,11 @@ protected:
|
||||||
template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
||||||
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
|
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
|
||||||
{
|
{
|
||||||
|
private:
|
||||||
typedef internal::traits<Ref> Traits;
|
typedef internal::traits<Ref> Traits;
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Ref(const PlainObjectBase<Derived>& expr);
|
inline Ref(const PlainObjectBase<Derived>& expr,
|
||||||
|
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef RefBase<Ref> Base;
|
typedef RefBase<Ref> Base;
|
||||||
|
@ -198,13 +201,15 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Ref(PlainObjectBase<Derived>& expr)
|
inline Ref(PlainObjectBase<Derived>& expr,
|
||||||
|
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||||
Base::construct(expr.derived());
|
Base::construct(expr.derived());
|
||||||
}
|
}
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Ref(const DenseBase<Derived>& expr)
|
inline Ref(const DenseBase<Derived>& expr,
|
||||||
|
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
|
||||||
#else
|
#else
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Ref(DenseBase<Derived>& expr)
|
inline Ref(DenseBase<Derived>& expr)
|
||||||
|
@ -231,13 +236,23 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
|
||||||
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
|
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
|
||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Ref(const DenseBase<Derived>& expr)
|
inline Ref(const DenseBase<Derived>& expr,
|
||||||
|
typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
|
||||||
{
|
{
|
||||||
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
|
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
|
||||||
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
|
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
|
||||||
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
|
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
|
||||||
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline Ref(const Ref& other) : Base(other) {
|
||||||
|
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename OtherRef>
|
||||||
|
inline Ref(const RefBase<OtherRef>& other) {
|
||||||
|
construct(other.derived(), typename Traits::template match<OtherRef>::type());
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,8 @@ template<typename Derived> class ReturnByValue
|
||||||
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
|
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
|
||||||
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
|
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
|
||||||
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
|
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
|
||||||
|
template<int LoadMode> Unusable& packet(Index) const;
|
||||||
|
template<int LoadMode> Unusable& packet(Index, Index) const;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -83,6 +85,15 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
||||||
return derived();
|
return derived();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename Derived>
|
||||||
|
template<typename OtherDerived>
|
||||||
|
Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
|
||||||
|
{
|
||||||
|
other.evalTo(derived());
|
||||||
|
return derived();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
||||||
#endif // EIGEN_RETURNBYVALUE_H
|
#endif // EIGEN_RETURNBYVALUE_H
|
||||||
|
|
|
@ -180,15 +180,9 @@ inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
|
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
|
||||||
{
|
{
|
||||||
typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
|
|
||||||
internal::scalar_quotient_op<Scalar>,
|
|
||||||
internal::scalar_product_op<Scalar> >::type BinOp;
|
|
||||||
typedef typename Derived::PlainObject PlainObject;
|
typedef typename Derived::PlainObject PlainObject;
|
||||||
SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
|
SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
|
||||||
Scalar actual_other;
|
tmp = PlainObject::Constant(rows(),cols(), other);
|
||||||
if(NumTraits<Scalar>::IsInteger) actual_other = other;
|
|
||||||
else actual_other = Scalar(1)/other;
|
|
||||||
tmp = PlainObject::Constant(rows(),cols(), actual_other);
|
|
||||||
return derived();
|
return derived();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -384,6 +384,7 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
||||||
a_lo = vget_low_s32(a);
|
a_lo = vget_low_s32(a);
|
||||||
a_hi = vget_high_s32(a);
|
a_hi = vget_high_s32(a);
|
||||||
max = vpmax_s32(a_lo, a_hi);
|
max = vpmax_s32(a_lo, a_hi);
|
||||||
|
max = vpmax_s32(max, max);
|
||||||
|
|
||||||
return vget_lane_s32(max, 0);
|
return vget_lane_s32(max, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
|
||||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
|
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
|
||||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
|
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
|
||||||
|
|
||||||
Packet4f tmp = _mm_setzero_ps(), fx;
|
Packet4f tmp, fx;
|
||||||
Packet4i emm0;
|
Packet4i emm0;
|
||||||
|
|
||||||
// clamp x
|
// clamp x
|
||||||
|
@ -195,7 +195,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
|
||||||
_EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
|
_EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
|
||||||
static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
|
static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
|
||||||
|
|
||||||
Packet2d tmp = _mm_setzero_pd(), fx;
|
Packet2d tmp, fx;
|
||||||
Packet4i emm0;
|
Packet4i emm0;
|
||||||
|
|
||||||
// clamp x
|
// clamp x
|
||||||
|
@ -279,7 +279,7 @@ Packet4f psin<Packet4f>(const Packet4f& _x)
|
||||||
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
|
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
|
||||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
||||||
|
|
||||||
Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
|
Packet4f xmm1, xmm2, xmm3, sign_bit, y;
|
||||||
|
|
||||||
Packet4i emm0, emm2;
|
Packet4i emm0, emm2;
|
||||||
sign_bit = x;
|
sign_bit = x;
|
||||||
|
@ -378,7 +378,7 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
|
||||||
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
|
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
|
||||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
||||||
|
|
||||||
Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
|
Packet4f xmm1, xmm2, xmm3, y;
|
||||||
Packet4i emm0, emm2;
|
Packet4i emm0, emm2;
|
||||||
|
|
||||||
x = pabs(x);
|
x = pabs(x);
|
||||||
|
|
|
@ -134,7 +134,7 @@ class CoeffBasedProduct
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
|
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
|
||||||
Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
|
Unroll ? InnerSize : Dynamic,
|
||||||
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
|
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
|
||||||
|
|
||||||
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
|
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
|
||||||
|
@ -185,7 +185,7 @@ class CoeffBasedProduct
|
||||||
{
|
{
|
||||||
PacketScalar res;
|
PacketScalar res;
|
||||||
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
|
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||||
Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
|
Unroll ? InnerSize : Dynamic,
|
||||||
_LhsNested, _RhsNested, PacketScalar, LoadMode>
|
_LhsNested, _RhsNested, PacketScalar, LoadMode>
|
||||||
::run(row, col, m_lhs, m_rhs, res);
|
::run(row, col, m_lhs, m_rhs, res);
|
||||||
return res;
|
return res;
|
||||||
|
@ -243,7 +243,17 @@ struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||||
{
|
{
|
||||||
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
|
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
|
||||||
res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
|
res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||||
|
struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
|
||||||
|
{
|
||||||
|
typedef typename Lhs::Index Index;
|
||||||
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||||
|
{
|
||||||
|
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -251,9 +261,9 @@ template<typename Lhs, typename Rhs, typename RetScalar>
|
||||||
struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
|
struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
|
||||||
{
|
{
|
||||||
typedef typename Lhs::Index Index;
|
typedef typename Lhs::Index Index;
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
|
||||||
{
|
{
|
||||||
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
|
res = RetScalar(0);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -293,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||||
|
struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
|
||||||
|
{
|
||||||
|
typedef typename Lhs::Index Index;
|
||||||
|
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
|
||||||
|
{
|
||||||
|
res = 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
|
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
|
||||||
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
||||||
{
|
{
|
||||||
|
@ -302,8 +322,7 @@ struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, Re
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||||
{
|
{
|
||||||
Packet pres;
|
Packet pres;
|
||||||
product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
|
product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
|
||||||
product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
|
|
||||||
res = predux(pres);
|
res = predux(pres);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -371,7 +390,7 @@ struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||||
{
|
{
|
||||||
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
||||||
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
|
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -382,12 +401,12 @@ struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||||
{
|
{
|
||||||
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
||||||
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
|
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||||
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
|
||||||
{
|
{
|
||||||
typedef typename Lhs::Index Index;
|
typedef typename Lhs::Index Index;
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||||
|
@ -397,7 +416,7 @@ struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||||
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
|
||||||
{
|
{
|
||||||
typedef typename Lhs::Index Index;
|
typedef typename Lhs::Index Index;
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||||
|
@ -406,16 +425,35 @@ struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||||
|
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||||
|
{
|
||||||
|
typedef typename Lhs::Index Index;
|
||||||
|
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
|
||||||
|
{
|
||||||
|
res = pset1<Packet>(0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||||
|
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||||
|
{
|
||||||
|
typedef typename Lhs::Index Index;
|
||||||
|
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
|
||||||
|
{
|
||||||
|
res = pset1<Packet>(0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||||
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||||
{
|
{
|
||||||
typedef typename Lhs::Index Index;
|
typedef typename Lhs::Index Index;
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
||||||
{
|
{
|
||||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
res = pset1<Packet>(0);
|
||||||
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
|
for(Index i = 0; i < lhs.cols(); ++i)
|
||||||
for(Index i = 1; i < lhs.cols(); ++i)
|
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
|
||||||
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -425,10 +463,9 @@ struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||||
typedef typename Lhs::Index Index;
|
typedef typename Lhs::Index Index;
|
||||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
||||||
{
|
{
|
||||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
res = pset1<Packet>(0);
|
||||||
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
|
for(Index i = 0; i < lhs.cols(); ++i)
|
||||||
for(Index i = 1; i < lhs.cols(); ++i)
|
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
|
||||||
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
|
||||||
if(transpose)
|
if(transpose)
|
||||||
std::swap(rows,cols);
|
std::swap(rows,cols);
|
||||||
|
|
||||||
Index blockCols = (cols / threads) & ~Index(0x3);
|
|
||||||
Index blockRows = (rows / threads) & ~Index(0x7);
|
|
||||||
|
|
||||||
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
|
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
|
||||||
|
|
||||||
#pragma omp parallel for schedule(static,1) num_threads(threads)
|
#pragma omp parallel num_threads(threads)
|
||||||
for(Index i=0; i<threads; ++i)
|
|
||||||
{
|
{
|
||||||
|
Index i = omp_get_thread_num();
|
||||||
|
// Note that the actual number of threads might be lower than the number of request ones.
|
||||||
|
Index actual_threads = omp_get_num_threads();
|
||||||
|
|
||||||
|
Index blockCols = (cols / actual_threads) & ~Index(0x3);
|
||||||
|
Index blockRows = (rows / actual_threads) & ~Index(0x7);
|
||||||
|
|
||||||
Index r0 = i*blockRows;
|
Index r0 = i*blockRows;
|
||||||
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
|
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
|
||||||
|
|
||||||
Index c0 = i*blockCols;
|
Index c0 = i*blockCols;
|
||||||
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
|
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
|
||||||
|
|
||||||
info[i].rhs_start = c0;
|
info[i].rhs_start = c0;
|
||||||
info[i].rhs_length = actualBlockCols;
|
info[i].rhs_length = actualBlockCols;
|
||||||
|
|
|
@ -109,7 +109,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
|
||||||
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
||||||
if (rows != depth) { \
|
if (rows != depth) { \
|
||||||
\
|
\
|
||||||
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
|
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
|
||||||
\
|
\
|
||||||
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
|
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
|
||||||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||||
|
@ -223,7 +223,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
|
||||||
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
||||||
if (cols != depth) { \
|
if (cols != depth) { \
|
||||||
\
|
\
|
||||||
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
|
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
|
||||||
\
|
\
|
||||||
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
|
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
|
||||||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||||
|
|
|
@ -302,9 +302,12 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
|
||||||
for (Index i=0; i<actual_mc; ++i)
|
for (Index i=0; i<actual_mc; ++i)
|
||||||
r[i] -= a[i] * b;
|
r[i] -= a[i] * b;
|
||||||
}
|
}
|
||||||
Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
|
if((Mode & UnitDiag)==0)
|
||||||
for (Index i=0; i<actual_mc; ++i)
|
{
|
||||||
r[i] *= b;
|
Scalar b = conj(rhs(j,j));
|
||||||
|
for (Index i=0; i<actual_mc; ++i)
|
||||||
|
r[i] /= b;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// pack the just computed part of lhs to A
|
// pack the just computed part of lhs to A
|
||||||
|
|
|
@ -433,6 +433,19 @@ struct MatrixXpr {};
|
||||||
/** The type used to identify an array expression */
|
/** The type used to identify an array expression */
|
||||||
struct ArrayXpr {};
|
struct ArrayXpr {};
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
/** \internal
|
||||||
|
* Constants for comparison functors
|
||||||
|
*/
|
||||||
|
enum ComparisonName {
|
||||||
|
cmp_EQ = 0,
|
||||||
|
cmp_LT = 1,
|
||||||
|
cmp_LE = 2,
|
||||||
|
cmp_UNORD = 3,
|
||||||
|
cmp_NEQ = 4
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
||||||
#endif // EIGEN_CONSTANTS_H
|
#endif // EIGEN_CONSTANTS_H
|
||||||
|
|
|
@ -235,6 +235,9 @@ template<typename Scalar> class Rotation2D;
|
||||||
template<typename Scalar> class AngleAxis;
|
template<typename Scalar> class AngleAxis;
|
||||||
template<typename Scalar,int Dim> class Translation;
|
template<typename Scalar,int Dim> class Translation;
|
||||||
|
|
||||||
|
// Sparse module:
|
||||||
|
template<typename Derived> class SparseMatrixBase;
|
||||||
|
|
||||||
#ifdef EIGEN2_SUPPORT
|
#ifdef EIGEN2_SUPPORT
|
||||||
template<typename Derived, int _Dim> class eigen2_RotationBase;
|
template<typename Derived, int _Dim> class eigen2_RotationBase;
|
||||||
template<typename Lhs, typename Rhs> class eigen2_Cross;
|
template<typename Lhs, typename Rhs> class eigen2_Cross;
|
||||||
|
|
|
@ -76,6 +76,38 @@
|
||||||
#include <mkl_lapacke.h>
|
#include <mkl_lapacke.h>
|
||||||
#define EIGEN_MKL_VML_THRESHOLD 128
|
#define EIGEN_MKL_VML_THRESHOLD 128
|
||||||
|
|
||||||
|
/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
|
||||||
|
/* MKL_BLAS, etc are not defined in 11.2 */
|
||||||
|
#ifdef MKL_DOMAIN_ALL
|
||||||
|
#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
|
||||||
|
#else
|
||||||
|
#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MKL_DOMAIN_BLAS
|
||||||
|
#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
|
||||||
|
#else
|
||||||
|
#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MKL_DOMAIN_FFT
|
||||||
|
#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
|
||||||
|
#else
|
||||||
|
#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MKL_DOMAIN_VML
|
||||||
|
#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
|
||||||
|
#else
|
||||||
|
#define EIGEN_MKL_DOMAIN_VML MKL_VML
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MKL_DOMAIN_PARDISO
|
||||||
|
#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
|
||||||
|
#else
|
||||||
|
#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
typedef std::complex<double> dcomplex;
|
typedef std::complex<double> dcomplex;
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
#define EIGEN_WORLD_VERSION 3
|
#define EIGEN_WORLD_VERSION 3
|
||||||
#define EIGEN_MAJOR_VERSION 2
|
#define EIGEN_MAJOR_VERSION 2
|
||||||
#define EIGEN_MINOR_VERSION 4
|
#define EIGEN_MINOR_VERSION 7
|
||||||
|
|
||||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||||
|
@ -96,6 +96,20 @@
|
||||||
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
|
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// A Clang feature extension to determine compiler features.
|
||||||
|
// We use it to determine 'cxx_rvalue_references'
|
||||||
|
#ifndef __has_feature
|
||||||
|
# define __has_feature(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Do we support r-value references?
|
||||||
|
#if (__has_feature(cxx_rvalue_references) || \
|
||||||
|
defined(__GXX_EXPERIMENTAL_CXX0X__) || \
|
||||||
|
(defined(_MSC_VER) && _MSC_VER >= 1600))
|
||||||
|
#define EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// Cross compiler wrapper around LLVM's __has_builtin
|
// Cross compiler wrapper around LLVM's __has_builtin
|
||||||
#ifdef __has_builtin
|
#ifdef __has_builtin
|
||||||
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
|
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
|
||||||
|
@ -278,6 +292,7 @@ namespace Eigen {
|
||||||
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
|
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
|
||||||
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
|
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
|
||||||
|
|
||||||
#if EIGEN_ALIGN_STATICALLY
|
#if EIGEN_ALIGN_STATICALLY
|
||||||
|
@ -332,8 +347,11 @@ namespace Eigen {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
|
/** \internal
|
||||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
|
* \brief Macro to manually inherit assignment operators.
|
||||||
|
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
|
||||||
|
*/
|
||||||
|
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Just a side note. Commenting within defines works only by documenting
|
* Just a side note. Commenting within defines works only by documenting
|
||||||
|
@ -405,6 +423,8 @@ namespace Eigen {
|
||||||
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
|
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
|
||||||
: ((int)a >= (int)b) ? (int)a : (int)b)
|
: ((int)a >= (int)b) ? (int)a : (int)b)
|
||||||
|
|
||||||
|
#define EIGEN_ADD_COST(a,b) int(a)==Dynamic || int(b)==Dynamic ? Dynamic : int(a)+int(b)
|
||||||
|
|
||||||
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
|
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
|
||||||
|
|
||||||
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
|
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
|
||||||
|
|
|
@ -523,7 +523,7 @@ template<typename T> struct smart_copy_helper<T,false> {
|
||||||
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
|
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
|
||||||
// to the appropriate stack allocation function
|
// to the appropriate stack allocation function
|
||||||
#ifndef EIGEN_ALLOCA
|
#ifndef EIGEN_ALLOCA
|
||||||
#if (defined __linux__)
|
#if (defined __linux__) || (defined __APPLE__) || (defined alloca)
|
||||||
#define EIGEN_ALLOCA alloca
|
#define EIGEN_ALLOCA alloca
|
||||||
#elif defined(_MSC_VER)
|
#elif defined(_MSC_VER)
|
||||||
#define EIGEN_ALLOCA _alloca
|
#define EIGEN_ALLOCA _alloca
|
||||||
|
|
|
@ -366,17 +366,17 @@ struct dense_xpr_base<Derived, ArrayXpr>
|
||||||
|
|
||||||
/** \internal Helper base class to add a scalar multiple operator
|
/** \internal Helper base class to add a scalar multiple operator
|
||||||
* overloads for complex types */
|
* overloads for complex types */
|
||||||
template<typename Derived,typename Scalar,typename OtherScalar,
|
template<typename Derived, typename Scalar, typename OtherScalar, typename BaseType,
|
||||||
bool EnableIt = !is_same<Scalar,OtherScalar>::value >
|
bool EnableIt = !is_same<Scalar,OtherScalar>::value >
|
||||||
struct special_scalar_op_base : public DenseCoeffsBase<Derived>
|
struct special_scalar_op_base : public BaseType
|
||||||
{
|
{
|
||||||
// dummy operator* so that the
|
// dummy operator* so that the
|
||||||
// "using special_scalar_op_base::operator*" compiles
|
// "using special_scalar_op_base::operator*" compiles
|
||||||
void operator*() const;
|
void operator*() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Derived,typename Scalar,typename OtherScalar>
|
template<typename Derived,typename Scalar,typename OtherScalar, typename BaseType>
|
||||||
struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCoeffsBase<Derived>
|
struct special_scalar_op_base<Derived,Scalar,OtherScalar,BaseType,true> : public BaseType
|
||||||
{
|
{
|
||||||
const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
|
const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
|
||||||
operator*(const OtherScalar& scalar) const
|
operator*(const OtherScalar& scalar) const
|
||||||
|
|
|
@ -234,6 +234,12 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
EigenvectorType m_eivec;
|
EigenvectorType m_eivec;
|
||||||
EigenvalueType m_eivalues;
|
EigenvalueType m_eivalues;
|
||||||
ComplexSchur<MatrixType> m_schur;
|
ComplexSchur<MatrixType> m_schur;
|
||||||
|
@ -251,6 +257,8 @@ template<typename MatrixType>
|
||||||
ComplexEigenSolver<MatrixType>&
|
ComplexEigenSolver<MatrixType>&
|
||||||
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
// this code is inspired from Jampack
|
// this code is inspired from Jampack
|
||||||
eigen_assert(matrix.cols() == matrix.rows());
|
eigen_assert(matrix.cols() == matrix.rows());
|
||||||
|
|
||||||
|
|
|
@ -298,6 +298,13 @@ template<typename _MatrixType> class EigenSolver
|
||||||
void doComputeEigenvectors();
|
void doComputeEigenvectors();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_eivec;
|
MatrixType m_eivec;
|
||||||
EigenvalueType m_eivalues;
|
EigenvalueType m_eivalues;
|
||||||
bool m_isInitialized;
|
bool m_isInitialized;
|
||||||
|
@ -364,6 +371,8 @@ template<typename MatrixType>
|
||||||
EigenSolver<MatrixType>&
|
EigenSolver<MatrixType>&
|
||||||
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
using std::sqrt;
|
using std::sqrt;
|
||||||
using std::abs;
|
using std::abs;
|
||||||
eigen_assert(matrix.cols() == matrix.rows());
|
eigen_assert(matrix.cols() == matrix.rows());
|
||||||
|
|
|
@ -263,6 +263,13 @@ template<typename _MatrixType> class GeneralizedEigenSolver
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_eivec;
|
MatrixType m_eivec;
|
||||||
ComplexVectorType m_alphas;
|
ComplexVectorType m_alphas;
|
||||||
VectorType m_betas;
|
VectorType m_betas;
|
||||||
|
@ -290,6 +297,8 @@ template<typename MatrixType>
|
||||||
GeneralizedEigenSolver<MatrixType>&
|
GeneralizedEigenSolver<MatrixType>&
|
||||||
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
|
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
using std::sqrt;
|
using std::sqrt;
|
||||||
using std::abs;
|
using std::abs;
|
||||||
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
|
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
|
||||||
|
|
|
@ -240,10 +240,10 @@ namespace Eigen {
|
||||||
m_S.coeffRef(i,j) = Scalar(0.0);
|
m_S.coeffRef(i,j) = Scalar(0.0);
|
||||||
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
|
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
|
||||||
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
|
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
|
||||||
|
// update Q
|
||||||
|
if (m_computeQZ)
|
||||||
|
m_Q.applyOnTheRight(i-1,i,G);
|
||||||
}
|
}
|
||||||
// update Q
|
|
||||||
if (m_computeQZ)
|
|
||||||
m_Q.applyOnTheRight(i-1,i,G);
|
|
||||||
// kill T(i,i-1)
|
// kill T(i,i-1)
|
||||||
if(m_T.coeff(i,i-1)!=Scalar(0))
|
if(m_T.coeff(i,i-1)!=Scalar(0))
|
||||||
{
|
{
|
||||||
|
@ -251,10 +251,10 @@ namespace Eigen {
|
||||||
m_T.coeffRef(i,i-1) = Scalar(0.0);
|
m_T.coeffRef(i,i-1) = Scalar(0.0);
|
||||||
m_S.applyOnTheRight(i,i-1,G);
|
m_S.applyOnTheRight(i,i-1,G);
|
||||||
m_T.topRows(i).applyOnTheRight(i,i-1,G);
|
m_T.topRows(i).applyOnTheRight(i,i-1,G);
|
||||||
|
// update Z
|
||||||
|
if (m_computeQZ)
|
||||||
|
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
|
||||||
}
|
}
|
||||||
// update Z
|
|
||||||
if (m_computeQZ)
|
|
||||||
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -234,7 +234,7 @@ template<typename _MatrixType> class RealSchur
|
||||||
typedef Matrix<Scalar,3,1> Vector3s;
|
typedef Matrix<Scalar,3,1> Vector3s;
|
||||||
|
|
||||||
Scalar computeNormOfT();
|
Scalar computeNormOfT();
|
||||||
Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
|
Index findSmallSubdiagEntry(Index iu);
|
||||||
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
|
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
|
||||||
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
|
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
|
||||||
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
|
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
|
||||||
|
@ -286,7 +286,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
|
||||||
{
|
{
|
||||||
while (iu >= 0)
|
while (iu >= 0)
|
||||||
{
|
{
|
||||||
Index il = findSmallSubdiagEntry(iu, norm);
|
Index il = findSmallSubdiagEntry(iu);
|
||||||
|
|
||||||
// Check for convergence
|
// Check for convergence
|
||||||
if (il == iu) // One root found
|
if (il == iu) // One root found
|
||||||
|
@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
||||||
|
|
||||||
/** \internal Look for single small sub-diagonal element and returns its index */
|
/** \internal Look for single small sub-diagonal element and returns its index */
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
|
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
|
||||||
{
|
{
|
||||||
using std::abs;
|
using std::abs;
|
||||||
Index res = iu;
|
Index res = iu;
|
||||||
while (res > 0)
|
while (res > 0)
|
||||||
{
|
{
|
||||||
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
|
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
|
||||||
if (s == 0.0)
|
if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
|
||||||
s = norm;
|
|
||||||
if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
|
|
||||||
break;
|
break;
|
||||||
res--;
|
res--;
|
||||||
}
|
}
|
||||||
|
@ -457,9 +455,7 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
|
||||||
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
|
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
|
||||||
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
|
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
|
||||||
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
|
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
|
||||||
{
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||||
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
||||||
typedef typename MatrixType::Scalar Scalar;
|
typedef typename MatrixType::Scalar Scalar;
|
||||||
typedef typename MatrixType::Index Index;
|
typedef typename MatrixType::Index Index;
|
||||||
|
|
||||||
|
typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
|
||||||
|
|
||||||
/** \brief Real scalar type for \p _MatrixType.
|
/** \brief Real scalar type for \p _MatrixType.
|
||||||
*
|
*
|
||||||
|
@ -225,7 +227,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||||
*
|
*
|
||||||
* \sa eigenvalues()
|
* \sa eigenvalues()
|
||||||
*/
|
*/
|
||||||
const MatrixType& eigenvectors() const
|
const EigenvectorsType& eigenvectors() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||||
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||||
|
@ -351,7 +353,12 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||||
#endif // EIGEN2_SUPPORT
|
#endif // EIGEN2_SUPPORT
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MatrixType m_eivec;
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
|
EigenvectorsType m_eivec;
|
||||||
RealVectorType m_eivalues;
|
RealVectorType m_eivalues;
|
||||||
typename TridiagonalizationType::SubDiagonalType m_subdiag;
|
typename TridiagonalizationType::SubDiagonalType m_subdiag;
|
||||||
ComputationInfo m_info;
|
ComputationInfo m_info;
|
||||||
|
@ -376,7 +383,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||||
* "implicit symmetric QR step with Wilkinson shift"
|
* "implicit symmetric QR step with Wilkinson shift"
|
||||||
*/
|
*/
|
||||||
namespace internal {
|
namespace internal {
|
||||||
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
|
template<typename RealScalar, typename Scalar, typename Index>
|
||||||
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
|
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,6 +391,8 @@ template<typename MatrixType>
|
||||||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||||
::compute(const MatrixType& matrix, int options)
|
::compute(const MatrixType& matrix, int options)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
using std::abs;
|
using std::abs;
|
||||||
eigen_assert(matrix.cols() == matrix.rows());
|
eigen_assert(matrix.cols() == matrix.rows());
|
||||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||||
|
@ -406,7 +415,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||||
|
|
||||||
// declare some aliases
|
// declare some aliases
|
||||||
RealVectorType& diag = m_eivalues;
|
RealVectorType& diag = m_eivalues;
|
||||||
MatrixType& mat = m_eivec;
|
EigenvectorsType& mat = m_eivec;
|
||||||
|
|
||||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||||
mat = matrix.template triangularView<Lower>();
|
mat = matrix.template triangularView<Lower>();
|
||||||
|
@ -442,7 +451,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||||
while (start>0 && m_subdiag[start-1]!=0)
|
while (start>0 && m_subdiag[start-1]!=0)
|
||||||
start--;
|
start--;
|
||||||
|
|
||||||
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iter <= m_maxIterations * n)
|
if (iter <= m_maxIterations * n)
|
||||||
|
@ -490,7 +499,13 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
|
||||||
typedef typename SolverType::MatrixType MatrixType;
|
typedef typename SolverType::MatrixType MatrixType;
|
||||||
typedef typename SolverType::RealVectorType VectorType;
|
typedef typename SolverType::RealVectorType VectorType;
|
||||||
typedef typename SolverType::Scalar Scalar;
|
typedef typename SolverType::Scalar Scalar;
|
||||||
|
typedef typename MatrixType::Index Index;
|
||||||
|
typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
||||||
|
|
||||||
|
/** \internal
|
||||||
|
* Computes the roots of the characteristic polynomial of \a m.
|
||||||
|
* For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
|
||||||
|
*/
|
||||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||||
{
|
{
|
||||||
using std::sqrt;
|
using std::sqrt;
|
||||||
|
@ -510,158 +525,123 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
|
||||||
// Construct the parameters used in classifying the roots of the equation
|
// Construct the parameters used in classifying the roots of the equation
|
||||||
// and in solving the equation for the roots in closed form.
|
// and in solving the equation for the roots in closed form.
|
||||||
Scalar c2_over_3 = c2*s_inv3;
|
Scalar c2_over_3 = c2*s_inv3;
|
||||||
Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
|
Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
|
||||||
if (a_over_3 > Scalar(0))
|
if(a_over_3<Scalar(0))
|
||||||
a_over_3 = Scalar(0);
|
a_over_3 = Scalar(0);
|
||||||
|
|
||||||
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
|
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
|
||||||
|
|
||||||
Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3;
|
Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
|
||||||
if (q > Scalar(0))
|
if(q<Scalar(0))
|
||||||
q = Scalar(0);
|
q = Scalar(0);
|
||||||
|
|
||||||
// Compute the eigenvalues by solving for the roots of the polynomial.
|
// Compute the eigenvalues by solving for the roots of the polynomial.
|
||||||
Scalar rho = sqrt(-a_over_3);
|
Scalar rho = sqrt(a_over_3);
|
||||||
Scalar theta = atan2(sqrt(-q),half_b)*s_inv3;
|
Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
|
||||||
Scalar cos_theta = cos(theta);
|
Scalar cos_theta = cos(theta);
|
||||||
Scalar sin_theta = sin(theta);
|
Scalar sin_theta = sin(theta);
|
||||||
roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
// roots are already sorted, since cos is monotonically decreasing on [0, pi]
|
||||||
roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
|
roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
|
||||||
roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
|
roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
|
||||||
|
roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
||||||
// Sort in increasing order.
|
|
||||||
if (roots(0) >= roots(1))
|
|
||||||
std::swap(roots(0),roots(1));
|
|
||||||
if (roots(1) >= roots(2))
|
|
||||||
{
|
|
||||||
std::swap(roots(1),roots(2));
|
|
||||||
if (roots(0) >= roots(1))
|
|
||||||
std::swap(roots(0),roots(1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
|
||||||
|
{
|
||||||
|
using std::abs;
|
||||||
|
Index i0;
|
||||||
|
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
|
||||||
|
mat.diagonal().cwiseAbs().maxCoeff(&i0);
|
||||||
|
// mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
|
||||||
|
// so let's save it:
|
||||||
|
representative = mat.col(i0);
|
||||||
|
Scalar n0, n1;
|
||||||
|
VectorType c0, c1;
|
||||||
|
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
|
||||||
|
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
|
||||||
|
if(n0>n1) res = c0/std::sqrt(n0);
|
||||||
|
else res = c1/std::sqrt(n1);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
||||||
{
|
{
|
||||||
using std::sqrt;
|
|
||||||
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
|
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
|
||||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||||
&& (options&EigVecMask)!=EigVecMask
|
&& (options&EigVecMask)!=EigVecMask
|
||||||
&& "invalid option parameter");
|
&& "invalid option parameter");
|
||||||
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
||||||
|
|
||||||
MatrixType& eivecs = solver.m_eivec;
|
EigenvectorsType& eivecs = solver.m_eivec;
|
||||||
VectorType& eivals = solver.m_eivalues;
|
VectorType& eivals = solver.m_eivalues;
|
||||||
|
|
||||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||||
Scalar scale = mat.cwiseAbs().maxCoeff();
|
Scalar shift = mat.trace() / Scalar(3);
|
||||||
MatrixType scaledMat = mat / scale;
|
// TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
|
||||||
|
MatrixType scaledMat = mat.template selfadjointView<Lower>();
|
||||||
|
scaledMat.diagonal().array() -= shift;
|
||||||
|
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
|
||||||
|
if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
|
||||||
|
|
||||||
// compute the eigenvalues
|
// compute the eigenvalues
|
||||||
computeRoots(scaledMat,eivals);
|
computeRoots(scaledMat,eivals);
|
||||||
|
|
||||||
// compute the eigen vectors
|
// compute the eigenvectors
|
||||||
if(computeEigenvectors)
|
if(computeEigenvectors)
|
||||||
{
|
{
|
||||||
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
|
|
||||||
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
|
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
|
||||||
{
|
{
|
||||||
|
// All three eigenvalues are numerically the same
|
||||||
eivecs.setIdentity();
|
eivecs.setIdentity();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
scaledMat = scaledMat.template selfadjointView<Lower>();
|
|
||||||
MatrixType tmp;
|
MatrixType tmp;
|
||||||
tmp = scaledMat;
|
tmp = scaledMat;
|
||||||
|
|
||||||
|
// Compute the eigenvector of the most distinct eigenvalue
|
||||||
Scalar d0 = eivals(2) - eivals(1);
|
Scalar d0 = eivals(2) - eivals(1);
|
||||||
Scalar d1 = eivals(1) - eivals(0);
|
Scalar d1 = eivals(1) - eivals(0);
|
||||||
int k = d0 > d1 ? 2 : 0;
|
Index k(0), l(2);
|
||||||
d0 = d0 > d1 ? d0 : d1;
|
if(d0 > d1)
|
||||||
|
|
||||||
tmp.diagonal().array () -= eivals(k);
|
|
||||||
VectorType cross;
|
|
||||||
Scalar n;
|
|
||||||
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
|
|
||||||
|
|
||||||
if(n>safeNorm2)
|
|
||||||
{
|
{
|
||||||
eivecs.col(k) = cross / sqrt(n);
|
std::swap(k,l);
|
||||||
|
d0 = d1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the eigenvector of index k
|
||||||
|
{
|
||||||
|
tmp.diagonal().array () -= eivals(k);
|
||||||
|
// By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
|
||||||
|
extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute eigenvector of index l
|
||||||
|
if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
|
||||||
|
{
|
||||||
|
// If d0 is too small, then the two other eigenvalues are numerically the same,
|
||||||
|
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
|
||||||
|
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
|
||||||
|
eivecs.col(l).normalize();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
|
tmp = scaledMat;
|
||||||
|
tmp.diagonal().array () -= eivals(l);
|
||||||
|
|
||||||
if(n>safeNorm2)
|
VectorType dummy;
|
||||||
{
|
extract_kernel(tmp, eivecs.col(l), dummy);
|
||||||
eivecs.col(k) = cross / sqrt(n);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
|
|
||||||
|
|
||||||
if(n>safeNorm2)
|
|
||||||
{
|
|
||||||
eivecs.col(k) = cross / sqrt(n);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// the input matrix and/or the eigenvaues probably contains some inf/NaN,
|
|
||||||
// => exit
|
|
||||||
// scale back to the original size.
|
|
||||||
eivals *= scale;
|
|
||||||
|
|
||||||
solver.m_info = NumericalIssue;
|
|
||||||
solver.m_isInitialized = true;
|
|
||||||
solver.m_eigenvectorsOk = computeEigenvectors;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp = scaledMat;
|
// Compute last eigenvector from the other two
|
||||||
tmp.diagonal().array() -= eivals(1);
|
eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
|
||||||
|
|
||||||
if(d0<=Eigen::NumTraits<Scalar>::epsilon())
|
|
||||||
{
|
|
||||||
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
|
|
||||||
if(n>safeNorm2)
|
|
||||||
{
|
|
||||||
eivecs.col(1) = cross / sqrt(n);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
|
|
||||||
if(n>safeNorm2)
|
|
||||||
eivecs.col(1) = cross / sqrt(n);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm();
|
|
||||||
if(n>safeNorm2)
|
|
||||||
eivecs.col(1) = cross / sqrt(n);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// we should never reach this point,
|
|
||||||
// if so the last two eigenvalues are likely to be very close to each other
|
|
||||||
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure that eivecs[1] is orthogonal to eivecs[2]
|
|
||||||
// FIXME: this step should not be needed
|
|
||||||
Scalar d = eivecs.col(1).dot(eivecs.col(k));
|
|
||||||
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
|
|
||||||
}
|
|
||||||
|
|
||||||
eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rescale back to the original size.
|
// Rescale back to the original size.
|
||||||
eivals *= scale;
|
eivals *= scale;
|
||||||
|
eivals.array() += shift;
|
||||||
|
|
||||||
solver.m_info = Success;
|
solver.m_info = Success;
|
||||||
solver.m_isInitialized = true;
|
solver.m_isInitialized = true;
|
||||||
|
@ -675,11 +655,12 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||||
typedef typename SolverType::MatrixType MatrixType;
|
typedef typename SolverType::MatrixType MatrixType;
|
||||||
typedef typename SolverType::RealVectorType VectorType;
|
typedef typename SolverType::RealVectorType VectorType;
|
||||||
typedef typename SolverType::Scalar Scalar;
|
typedef typename SolverType::Scalar Scalar;
|
||||||
|
typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
||||||
|
|
||||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||||
{
|
{
|
||||||
using std::sqrt;
|
using std::sqrt;
|
||||||
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
|
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
|
||||||
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
|
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
|
||||||
roots(0) = t1 - t0;
|
roots(0) = t1 - t0;
|
||||||
roots(1) = t1 + t0;
|
roots(1) = t1 + t0;
|
||||||
|
@ -688,13 +669,15 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||||
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
||||||
{
|
{
|
||||||
using std::sqrt;
|
using std::sqrt;
|
||||||
|
using std::abs;
|
||||||
|
|
||||||
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
|
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
|
||||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||||
&& (options&EigVecMask)!=EigVecMask
|
&& (options&EigVecMask)!=EigVecMask
|
||||||
&& "invalid option parameter");
|
&& "invalid option parameter");
|
||||||
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
||||||
|
|
||||||
MatrixType& eivecs = solver.m_eivec;
|
EigenvectorsType& eivecs = solver.m_eivec;
|
||||||
VectorType& eivals = solver.m_eivalues;
|
VectorType& eivals = solver.m_eivalues;
|
||||||
|
|
||||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||||
|
@ -708,22 +691,29 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||||
// compute the eigen vectors
|
// compute the eigen vectors
|
||||||
if(computeEigenvectors)
|
if(computeEigenvectors)
|
||||||
{
|
{
|
||||||
scaledMat.diagonal().array () -= eivals(1);
|
if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
|
||||||
Scalar a2 = numext::abs2(scaledMat(0,0));
|
|
||||||
Scalar c2 = numext::abs2(scaledMat(1,1));
|
|
||||||
Scalar b2 = numext::abs2(scaledMat(1,0));
|
|
||||||
if(a2>c2)
|
|
||||||
{
|
{
|
||||||
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
eivecs.setIdentity();
|
||||||
eivecs.col(1) /= sqrt(a2+b2);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
scaledMat.diagonal().array () -= eivals(1);
|
||||||
eivecs.col(1) /= sqrt(c2+b2);
|
Scalar a2 = numext::abs2(scaledMat(0,0));
|
||||||
}
|
Scalar c2 = numext::abs2(scaledMat(1,1));
|
||||||
|
Scalar b2 = numext::abs2(scaledMat(1,0));
|
||||||
|
if(a2>c2)
|
||||||
|
{
|
||||||
|
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
||||||
|
eivecs.col(1) /= sqrt(a2+b2);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
||||||
|
eivecs.col(1) /= sqrt(c2+b2);
|
||||||
|
}
|
||||||
|
|
||||||
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rescale back to the original size.
|
// Rescale back to the original size.
|
||||||
|
@ -746,7 +736,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
|
template<typename RealScalar, typename Scalar, typename Index>
|
||||||
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
|
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
|
||||||
{
|
{
|
||||||
using std::abs;
|
using std::abs;
|
||||||
|
@ -798,8 +788,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
|
||||||
// apply the givens rotation to the unit matrix Q = Q * G
|
// apply the givens rotation to the unit matrix Q = Q * G
|
||||||
if (matrixQ)
|
if (matrixQ)
|
||||||
{
|
{
|
||||||
// FIXME if StorageOrder == RowMajor this operation is not very efficient
|
Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
|
|
||||||
q.applyOnTheRight(k,k+1,rot);
|
q.applyOnTheRight(k,k+1,rot);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,10 +19,12 @@ namespace Eigen {
|
||||||
*
|
*
|
||||||
* \brief An axis aligned box
|
* \brief An axis aligned box
|
||||||
*
|
*
|
||||||
* \param _Scalar the type of the scalar coefficients
|
* \tparam _Scalar the type of the scalar coefficients
|
||||||
* \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||||
*
|
*
|
||||||
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
|
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
|
||||||
|
* \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
|
||||||
|
* \sa alignedboxtypedefs
|
||||||
*/
|
*/
|
||||||
template <typename _Scalar, int _AmbientDim>
|
template <typename _Scalar, int _AmbientDim>
|
||||||
class AlignedBox
|
class AlignedBox
|
||||||
|
@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
|
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
|
||||||
enum CornerType
|
enum CornerType
|
||||||
{
|
{
|
||||||
/** 1D names */
|
/** 1D names @{ */
|
||||||
Min=0, Max=1,
|
Min=0, Max=1,
|
||||||
|
/** @} */
|
||||||
|
|
||||||
/** Added names for 2D */
|
/** Identifier for 2D corner @{ */
|
||||||
BottomLeft=0, BottomRight=1,
|
BottomLeft=0, BottomRight=1,
|
||||||
TopLeft=2, TopRight=3,
|
TopLeft=2, TopRight=3,
|
||||||
|
/** @} */
|
||||||
|
|
||||||
/** Added names for 3D */
|
/** Identifier for 3D corner @{ */
|
||||||
BottomLeftFloor=0, BottomRightFloor=1,
|
BottomLeftFloor=0, BottomRightFloor=1,
|
||||||
TopLeftFloor=2, TopRightFloor=3,
|
TopLeftFloor=2, TopRightFloor=3,
|
||||||
BottomLeftCeil=4, BottomRightCeil=5,
|
BottomLeftCeil=4, BottomRightCeil=5,
|
||||||
TopLeftCeil=6, TopRightCeil=7
|
TopLeftCeil=6, TopRightCeil=7
|
||||||
|
/** @} */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
|
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
|
||||||
{ setEmpty(); }
|
{ setEmpty(); }
|
||||||
|
|
||||||
/** Constructs a box with extremities \a _min and \a _max. */
|
/** Constructs a box with extremities \a _min and \a _max.
|
||||||
|
* \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
|
||||||
template<typename OtherVectorType1, typename OtherVectorType2>
|
template<typename OtherVectorType1, typename OtherVectorType2>
|
||||||
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
|
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
|
||||||
|
|
||||||
/** Constructs a box containing a single point \a p. */
|
/** Constructs a box containing a single point \a p. */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
|
inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
|
||||||
{
|
{ }
|
||||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
|
||||||
m_min = p;
|
|
||||||
m_max = p;
|
|
||||||
}
|
|
||||||
|
|
||||||
~AlignedBox() {}
|
~AlignedBox() {}
|
||||||
|
|
||||||
/** \returns the dimension in which the box holds */
|
/** \returns the dimension in which the box holds */
|
||||||
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
|
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
|
||||||
|
|
||||||
/** \deprecated use isEmpty */
|
/** \deprecated use isEmpty() */
|
||||||
inline bool isNull() const { return isEmpty(); }
|
inline bool isNull() const { return isEmpty(); }
|
||||||
|
|
||||||
/** \deprecated use setEmpty */
|
/** \deprecated use setEmpty() */
|
||||||
inline void setNull() { setEmpty(); }
|
inline void setNull() { setEmpty(); }
|
||||||
|
|
||||||
/** \returns true if the box is empty. */
|
/** \returns true if the box is empty.
|
||||||
|
* \sa setEmpty */
|
||||||
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
|
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
|
||||||
|
|
||||||
/** Makes \c *this an empty box. */
|
/** Makes \c *this an empty box.
|
||||||
|
* \sa isEmpty */
|
||||||
inline void setEmpty()
|
inline void setEmpty()
|
||||||
{
|
{
|
||||||
m_min.setConstant( ScalarTraits::highest() );
|
m_min.setConstant( ScalarTraits::highest() );
|
||||||
|
@ -159,7 +163,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
* a uniform distribution */
|
* a uniform distribution */
|
||||||
inline VectorType sample() const
|
inline VectorType sample() const
|
||||||
{
|
{
|
||||||
VectorType r;
|
VectorType r(dim());
|
||||||
for(Index d=0; d<dim(); ++d)
|
for(Index d=0; d<dim(); ++d)
|
||||||
{
|
{
|
||||||
if(!ScalarTraits::IsInteger)
|
if(!ScalarTraits::IsInteger)
|
||||||
|
@ -175,27 +179,34 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
|
|
||||||
/** \returns true if the point \a p is inside the box \c *this. */
|
/** \returns true if the point \a p is inside the box \c *this. */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline bool contains(const MatrixBase<Derived>& a_p) const
|
inline bool contains(const MatrixBase<Derived>& p) const
|
||||||
{
|
{
|
||||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
typename internal::nested<Derived,2>::type p_n(p.derived());
|
||||||
return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
|
return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||||
inline bool contains(const AlignedBox& b) const
|
inline bool contains(const AlignedBox& b) const
|
||||||
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
||||||
|
|
||||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
/** \returns true if the box \a b is intersecting the box \c *this.
|
||||||
|
* \sa intersection, clamp */
|
||||||
|
inline bool intersects(const AlignedBox& b) const
|
||||||
|
{ return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
|
||||||
|
|
||||||
|
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
|
||||||
|
* \sa extend(const AlignedBox&) */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
|
inline AlignedBox& extend(const MatrixBase<Derived>& p)
|
||||||
{
|
{
|
||||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
typename internal::nested<Derived,2>::type p_n(p.derived());
|
||||||
m_min = m_min.cwiseMin(p);
|
m_min = m_min.cwiseMin(p_n);
|
||||||
m_max = m_max.cwiseMax(p);
|
m_max = m_max.cwiseMax(p_n);
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
|
||||||
|
* \sa merged, extend(const MatrixBase&) */
|
||||||
inline AlignedBox& extend(const AlignedBox& b)
|
inline AlignedBox& extend(const AlignedBox& b)
|
||||||
{
|
{
|
||||||
m_min = m_min.cwiseMin(b.m_min);
|
m_min = m_min.cwiseMin(b.m_min);
|
||||||
|
@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
/** Clamps \c *this by the box \a b and returns a reference to \c *this.
|
||||||
|
* \note If the boxes don't intersect, the resulting box is empty.
|
||||||
|
* \sa intersection(), intersects() */
|
||||||
inline AlignedBox& clamp(const AlignedBox& b)
|
inline AlignedBox& clamp(const AlignedBox& b)
|
||||||
{
|
{
|
||||||
m_min = m_min.cwiseMax(b.m_min);
|
m_min = m_min.cwiseMax(b.m_min);
|
||||||
|
@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns an AlignedBox that is the intersection of \a b and \c *this */
|
/** Returns an AlignedBox that is the intersection of \a b and \c *this
|
||||||
|
* \note If the boxes don't intersect, the resulting box is empty.
|
||||||
|
* \sa intersects(), clamp, contains() */
|
||||||
inline AlignedBox intersection(const AlignedBox& b) const
|
inline AlignedBox intersection(const AlignedBox& b) const
|
||||||
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
|
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
|
||||||
|
|
||||||
/** Returns an AlignedBox that is the union of \a b and \c *this */
|
/** Returns an AlignedBox that is the union of \a b and \c *this.
|
||||||
|
* \note Merging with an empty box may result in a box bigger than \c *this.
|
||||||
|
* \sa extend(const AlignedBox&) */
|
||||||
inline AlignedBox merged(const AlignedBox& b) const
|
inline AlignedBox merged(const AlignedBox& b) const
|
||||||
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
|
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
|
||||||
|
|
||||||
|
@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
|
|
||||||
/** \returns the squared distance between the point \a p and the box \c *this,
|
/** \returns the squared distance between the point \a p and the box \c *this,
|
||||||
* and zero if \a p is inside the box.
|
* and zero if \a p is inside the box.
|
||||||
* \sa exteriorDistance()
|
* \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
|
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
|
||||||
|
|
||||||
/** \returns the squared distance between the boxes \a b and \c *this,
|
/** \returns the squared distance between the boxes \a b and \c *this,
|
||||||
* and zero if the boxes intersect.
|
* and zero if the boxes intersect.
|
||||||
* \sa exteriorDistance()
|
* \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
|
||||||
*/
|
*/
|
||||||
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
|
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
|
||||||
|
|
||||||
/** \returns the distance between the point \a p and the box \c *this,
|
/** \returns the distance between the point \a p and the box \c *this,
|
||||||
* and zero if \a p is inside the box.
|
* and zero if \a p is inside the box.
|
||||||
* \sa squaredExteriorDistance()
|
* \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
|
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
|
||||||
|
@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||||
|
|
||||||
/** \returns the distance between the boxes \a b and \c *this,
|
/** \returns the distance between the boxes \a b and \c *this,
|
||||||
* and zero if the boxes intersect.
|
* and zero if the boxes intersect.
|
||||||
* \sa squaredExteriorDistance()
|
* \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
|
||||||
*/
|
*/
|
||||||
inline NonInteger exteriorDistance(const AlignedBox& b) const
|
inline NonInteger exteriorDistance(const AlignedBox& b) const
|
||||||
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
|
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
|
||||||
|
|
|
@ -131,7 +131,7 @@ public:
|
||||||
m_angle = Scalar(other.angle());
|
m_angle = Scalar(other.angle());
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
|
static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); }
|
||||||
|
|
||||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||||
* determined by \a prec.
|
* determined by \a prec.
|
||||||
|
@ -165,8 +165,8 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
|
||||||
Scalar n2 = q.vec().squaredNorm();
|
Scalar n2 = q.vec().squaredNorm();
|
||||||
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
|
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
|
||||||
{
|
{
|
||||||
m_angle = 0;
|
m_angle = Scalar(0);
|
||||||
m_axis << 1, 0, 0;
|
m_axis << Scalar(1), Scalar(0), Scalar(0);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -79,7 +79,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
|
||||||
{
|
{
|
||||||
if( (int(Direction)==Vertical && row==m_matrix.rows())
|
if( (int(Direction)==Vertical && row==m_matrix.rows())
|
||||||
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
|
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
|
||||||
return 1;
|
return Scalar(1);
|
||||||
return m_matrix.coeff(row, col);
|
return m_matrix.coeff(row, col);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,11 +102,11 @@ public:
|
||||||
/** \returns a quaternion representing an identity rotation
|
/** \returns a quaternion representing an identity rotation
|
||||||
* \sa MatrixBase::Identity()
|
* \sa MatrixBase::Identity()
|
||||||
*/
|
*/
|
||||||
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
|
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }
|
||||||
|
|
||||||
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
|
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
|
||||||
*/
|
*/
|
||||||
inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
|
inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }
|
||||||
|
|
||||||
/** \returns the squared norm of the quaternion's coefficients
|
/** \returns the squared norm of the quaternion's coefficients
|
||||||
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
|
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
|
||||||
|
@ -161,7 +161,7 @@ public:
|
||||||
{ return coeffs().isApprox(other.coeffs(), prec); }
|
{ return coeffs().isApprox(other.coeffs(), prec); }
|
||||||
|
|
||||||
/** return the result vector of \a v through the rotation*/
|
/** return the result vector of \a v through the rotation*/
|
||||||
EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
|
EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
|
||||||
|
|
||||||
/** \returns \c *this with scalar type casted to \a NewScalarType
|
/** \returns \c *this with scalar type casted to \a NewScalarType
|
||||||
*
|
*
|
||||||
|
@ -231,7 +231,7 @@ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
|
||||||
public:
|
public:
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
|
|
||||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
|
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
|
||||||
using Base::operator*=;
|
using Base::operator*=;
|
||||||
|
|
||||||
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
|
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
|
||||||
|
@ -341,7 +341,7 @@ class Map<const Quaternion<_Scalar>, _Options >
|
||||||
public:
|
public:
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
||||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
|
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||||
using Base::operator*=;
|
using Base::operator*=;
|
||||||
|
|
||||||
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
||||||
|
@ -378,7 +378,7 @@ class Map<Quaternion<_Scalar>, _Options >
|
||||||
public:
|
public:
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
||||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
|
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||||
using Base::operator*=;
|
using Base::operator*=;
|
||||||
|
|
||||||
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
||||||
|
@ -461,7 +461,7 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const Quaterni
|
||||||
*/
|
*/
|
||||||
template <class Derived>
|
template <class Derived>
|
||||||
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
|
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
|
||||||
QuaternionBase<Derived>::_transformVector(Vector3 v) const
|
QuaternionBase<Derived>::_transformVector(const Vector3& v) const
|
||||||
{
|
{
|
||||||
// Note that this algorithm comes from the optimization by hand
|
// Note that this algorithm comes from the optimization by hand
|
||||||
// of the conversion to a Matrix followed by a Matrix/Vector product.
|
// of the conversion to a Matrix followed by a Matrix/Vector product.
|
||||||
|
@ -637,7 +637,7 @@ inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Der
|
||||||
{
|
{
|
||||||
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
|
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
|
||||||
Scalar n2 = this->squaredNorm();
|
Scalar n2 = this->squaredNorm();
|
||||||
if (n2 > 0)
|
if (n2 > Scalar(0))
|
||||||
return Quaternion<Scalar>(conjugate().coeffs() / n2);
|
return Quaternion<Scalar>(conjugate().coeffs() / n2);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -667,12 +667,10 @@ template <class OtherDerived>
|
||||||
inline typename internal::traits<Derived>::Scalar
|
inline typename internal::traits<Derived>::Scalar
|
||||||
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
|
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
|
||||||
{
|
{
|
||||||
using std::acos;
|
using std::atan2;
|
||||||
using std::abs;
|
using std::abs;
|
||||||
Scalar d = abs(this->dot(other));
|
Quaternion<Scalar> d = (*this) * other.conjugate();
|
||||||
if (d>=Scalar(1))
|
return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
|
||||||
return Scalar(0);
|
|
||||||
return Scalar(2) * acos(d);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -712,7 +710,7 @@ QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerive
|
||||||
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
|
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
|
||||||
scale1 = sin( ( t * theta) ) / sinTheta;
|
scale1 = sin( ( t * theta) ) / sinTheta;
|
||||||
}
|
}
|
||||||
if(d<0) scale1 = -scale1;
|
if(d<Scalar(0)) scale1 = -scale1;
|
||||||
|
|
||||||
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
|
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,10 +65,10 @@ class DiagonalPreconditioner
|
||||||
{
|
{
|
||||||
typename MatType::InnerIterator it(mat,j);
|
typename MatType::InnerIterator it(mat,j);
|
||||||
while(it && it.index()!=j) ++it;
|
while(it && it.index()!=j) ++it;
|
||||||
if(it && it.index()==j)
|
if(it && it.index()==j && it.value()!=Scalar(0))
|
||||||
m_invdiag(j) = Scalar(1)/it.value();
|
m_invdiag(j) = Scalar(1)/it.value();
|
||||||
else
|
else
|
||||||
m_invdiag(j) = 0;
|
m_invdiag(j) = Scalar(1);
|
||||||
}
|
}
|
||||||
m_isInitialized = true;
|
m_isInitialized = true;
|
||||||
return *this;
|
return *this;
|
||||||
|
|
|
@ -151,20 +151,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
|
||||||
* \endcode
|
* \endcode
|
||||||
*
|
*
|
||||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||||
* One can control the start using the solveWithGuess() method. Here is a step by
|
* One can control the start using the solveWithGuess() method.
|
||||||
* step execution example starting with a random guess and printing the evolution
|
|
||||||
* of the estimated error:
|
|
||||||
* * \code
|
|
||||||
* x = VectorXd::Random(n);
|
|
||||||
* solver.setMaxIterations(1);
|
|
||||||
* int i = 0;
|
|
||||||
* do {
|
|
||||||
* x = solver.solveWithGuess(b,x);
|
|
||||||
* std::cout << i << " : " << solver.error() << std::endl;
|
|
||||||
* ++i;
|
|
||||||
* } while (solver.info()!=Success && i<100);
|
|
||||||
* \endcode
|
|
||||||
* Note that such a step by step excution is slightly slower.
|
|
||||||
*
|
*
|
||||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||||
*/
|
*/
|
||||||
|
@ -199,7 +186,8 @@ public:
|
||||||
* this class becomes invalid. Call compute() to update it with the new
|
* this class becomes invalid. Call compute() to update it with the new
|
||||||
* matrix A, or modify a copy of A.
|
* matrix A, or modify a copy of A.
|
||||||
*/
|
*/
|
||||||
BiCGSTAB(const MatrixType& A) : Base(A) {}
|
template<typename MatrixDerived>
|
||||||
|
explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
||||||
|
|
||||||
~BiCGSTAB() {}
|
~BiCGSTAB() {}
|
||||||
|
|
||||||
|
|
|
@ -112,9 +112,9 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
||||||
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
|
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
|
||||||
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
|
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
|
||||||
*
|
*
|
||||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
|
||||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
|
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
|
||||||
* or Upper. Default is Lower.
|
* Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
|
||||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||||
*
|
*
|
||||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||||
|
@ -137,20 +137,7 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
||||||
* \endcode
|
* \endcode
|
||||||
*
|
*
|
||||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||||
* One can control the start using the solveWithGuess() method. Here is a step by
|
* One can control the start using the solveWithGuess() method.
|
||||||
* step execution example starting with a random guess and printing the evolution
|
|
||||||
* of the estimated error:
|
|
||||||
* * \code
|
|
||||||
* x = VectorXd::Random(n);
|
|
||||||
* cg.setMaxIterations(1);
|
|
||||||
* int i = 0;
|
|
||||||
* do {
|
|
||||||
* x = cg.solveWithGuess(b,x);
|
|
||||||
* std::cout << i << " : " << cg.error() << std::endl;
|
|
||||||
* ++i;
|
|
||||||
* } while (cg.info()!=Success && i<100);
|
|
||||||
* \endcode
|
|
||||||
* Note that such a step by step excution is slightly slower.
|
|
||||||
*
|
*
|
||||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||||
*/
|
*/
|
||||||
|
@ -189,7 +176,8 @@ public:
|
||||||
* this class becomes invalid. Call compute() to update it with the new
|
* this class becomes invalid. Call compute() to update it with the new
|
||||||
* matrix A, or modify a copy of A.
|
* matrix A, or modify a copy of A.
|
||||||
*/
|
*/
|
||||||
ConjugateGradient(const MatrixType& A) : Base(A) {}
|
template<typename MatrixDerived>
|
||||||
|
explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
||||||
|
|
||||||
~ConjugateGradient() {}
|
~ConjugateGradient() {}
|
||||||
|
|
||||||
|
@ -213,6 +201,10 @@ public:
|
||||||
template<typename Rhs,typename Dest>
|
template<typename Rhs,typename Dest>
|
||||||
void _solveWithGuess(const Rhs& b, Dest& x) const
|
void _solveWithGuess(const Rhs& b, Dest& x) const
|
||||||
{
|
{
|
||||||
|
typedef typename internal::conditional<UpLo==(Lower|Upper),
|
||||||
|
const MatrixType&,
|
||||||
|
SparseSelfAdjointView<const MatrixType, UpLo>
|
||||||
|
>::type MatrixWrapperType;
|
||||||
m_iterations = Base::maxIterations();
|
m_iterations = Base::maxIterations();
|
||||||
m_error = Base::m_tolerance;
|
m_error = Base::m_tolerance;
|
||||||
|
|
||||||
|
@ -222,8 +214,7 @@ public:
|
||||||
m_error = Base::m_tolerance;
|
m_error = Base::m_tolerance;
|
||||||
|
|
||||||
typename Dest::ColXpr xj(x,j);
|
typename Dest::ColXpr xj(x,j);
|
||||||
internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
|
internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
|
||||||
Base::m_preconditioner, m_iterations, m_error);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m_isInitialized = true;
|
m_isInitialized = true;
|
||||||
|
@ -234,7 +225,7 @@ public:
|
||||||
template<typename Rhs,typename Dest>
|
template<typename Rhs,typename Dest>
|
||||||
void _solve(const Rhs& b, Dest& x) const
|
void _solve(const Rhs& b, Dest& x) const
|
||||||
{
|
{
|
||||||
x.setOnes();
|
x.setZero();
|
||||||
_solveWithGuess(b,x);
|
_solveWithGuess(b,x);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable
|
||||||
{
|
{
|
||||||
analyzePattern(amat);
|
analyzePattern(amat);
|
||||||
factorize(amat);
|
factorize(amat);
|
||||||
m_isInitialized = m_factorizationIsOk;
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,7 +159,7 @@ class IncompleteLUT : internal::noncopyable
|
||||||
template<typename Rhs, typename Dest>
|
template<typename Rhs, typename Dest>
|
||||||
void _solve(const Rhs& b, Dest& x) const
|
void _solve(const Rhs& b, Dest& x) const
|
||||||
{
|
{
|
||||||
x = m_Pinv * b;
|
x = m_Pinv * b;
|
||||||
x = m_lu.template triangularView<UnitLower>().solve(x);
|
x = m_lu.template triangularView<UnitLower>().solve(x);
|
||||||
x = m_lu.template triangularView<Upper>().solve(x);
|
x = m_lu.template triangularView<Upper>().solve(x);
|
||||||
x = m_P * x;
|
x = m_P * x;
|
||||||
|
@ -223,18 +222,29 @@ template<typename _MatrixType>
|
||||||
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
|
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
|
||||||
{
|
{
|
||||||
// Compute the Fill-reducing permutation
|
// Compute the Fill-reducing permutation
|
||||||
|
// Since ILUT does not perform any numerical pivoting,
|
||||||
|
// it is highly preferable to keep the diagonal through symmetric permutations.
|
||||||
|
#ifndef EIGEN_MPL2_ONLY
|
||||||
|
// To this end, let's symmetrize the pattern and perform AMD on it.
|
||||||
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
|
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
|
||||||
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
|
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
|
||||||
// Symmetrize the pattern
|
|
||||||
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
|
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
|
||||||
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
|
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
|
||||||
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
|
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
|
||||||
AtA.prune(keep_diag());
|
AMDOrdering<Index> ordering;
|
||||||
internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
|
ordering(AtA,m_P);
|
||||||
|
m_Pinv = m_P.inverse(); // cache the inverse permutation
|
||||||
m_Pinv = m_P.inverse(); // ... and the inverse permutation
|
#else
|
||||||
|
// If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
|
||||||
|
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
|
||||||
|
COLAMDOrdering<Index> ordering;
|
||||||
|
ordering(mat1,m_Pinv);
|
||||||
|
m_P = m_Pinv.inverse();
|
||||||
|
#endif
|
||||||
|
|
||||||
m_analysisIsOk = true;
|
m_analysisIsOk = true;
|
||||||
|
m_factorizationIsOk = false;
|
||||||
|
m_isInitialized = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Scalar>
|
template <typename Scalar>
|
||||||
|
@ -442,6 +452,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||||
m_lu.makeCompressed();
|
m_lu.makeCompressed();
|
||||||
|
|
||||||
m_factorizationIsOk = true;
|
m_factorizationIsOk = true;
|
||||||
|
m_isInitialized = m_factorizationIsOk;
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,10 +49,11 @@ public:
|
||||||
* this class becomes invalid. Call compute() to update it with the new
|
* this class becomes invalid. Call compute() to update it with the new
|
||||||
* matrix A, or modify a copy of A.
|
* matrix A, or modify a copy of A.
|
||||||
*/
|
*/
|
||||||
IterativeSolverBase(const MatrixType& A)
|
template<typename InputDerived>
|
||||||
|
IterativeSolverBase(const EigenBase<InputDerived>& A)
|
||||||
{
|
{
|
||||||
init();
|
init();
|
||||||
compute(A);
|
compute(A.derived());
|
||||||
}
|
}
|
||||||
|
|
||||||
~IterativeSolverBase() {}
|
~IterativeSolverBase() {}
|
||||||
|
@ -62,9 +63,11 @@ public:
|
||||||
* Currently, this function mostly call analyzePattern on the preconditioner. In the future
|
* Currently, this function mostly call analyzePattern on the preconditioner. In the future
|
||||||
* we might, for instance, implement column reodering for faster matrix vector products.
|
* we might, for instance, implement column reodering for faster matrix vector products.
|
||||||
*/
|
*/
|
||||||
Derived& analyzePattern(const MatrixType& A)
|
template<typename InputDerived>
|
||||||
|
Derived& analyzePattern(const EigenBase<InputDerived>& A)
|
||||||
{
|
{
|
||||||
m_preconditioner.analyzePattern(A);
|
grabInput(A.derived());
|
||||||
|
m_preconditioner.analyzePattern(*mp_matrix);
|
||||||
m_isInitialized = true;
|
m_isInitialized = true;
|
||||||
m_analysisIsOk = true;
|
m_analysisIsOk = true;
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
|
@ -80,11 +83,12 @@ public:
|
||||||
* this class becomes invalid. Call compute() to update it with the new
|
* this class becomes invalid. Call compute() to update it with the new
|
||||||
* matrix A, or modify a copy of A.
|
* matrix A, or modify a copy of A.
|
||||||
*/
|
*/
|
||||||
Derived& factorize(const MatrixType& A)
|
template<typename InputDerived>
|
||||||
|
Derived& factorize(const EigenBase<InputDerived>& A)
|
||||||
{
|
{
|
||||||
|
grabInput(A.derived());
|
||||||
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
||||||
mp_matrix = &A;
|
m_preconditioner.factorize(*mp_matrix);
|
||||||
m_preconditioner.factorize(A);
|
|
||||||
m_factorizationIsOk = true;
|
m_factorizationIsOk = true;
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
return derived();
|
return derived();
|
||||||
|
@ -100,10 +104,11 @@ public:
|
||||||
* this class becomes invalid. Call compute() to update it with the new
|
* this class becomes invalid. Call compute() to update it with the new
|
||||||
* matrix A, or modify a copy of A.
|
* matrix A, or modify a copy of A.
|
||||||
*/
|
*/
|
||||||
Derived& compute(const MatrixType& A)
|
template<typename InputDerived>
|
||||||
|
Derived& compute(const EigenBase<InputDerived>& A)
|
||||||
{
|
{
|
||||||
mp_matrix = &A;
|
grabInput(A.derived());
|
||||||
m_preconditioner.compute(A);
|
m_preconditioner.compute(*mp_matrix);
|
||||||
m_isInitialized = true;
|
m_isInitialized = true;
|
||||||
m_analysisIsOk = true;
|
m_analysisIsOk = true;
|
||||||
m_factorizationIsOk = true;
|
m_factorizationIsOk = true;
|
||||||
|
@ -212,6 +217,28 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
template<typename InputDerived>
|
||||||
|
void grabInput(const EigenBase<InputDerived>& A)
|
||||||
|
{
|
||||||
|
// we const cast to prevent the creation of a MatrixType temporary by the compiler.
|
||||||
|
grabInput_impl(A.const_cast_derived());
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename InputDerived>
|
||||||
|
void grabInput_impl(const EigenBase<InputDerived>& A)
|
||||||
|
{
|
||||||
|
m_copyMatrix = A;
|
||||||
|
mp_matrix = &m_copyMatrix;
|
||||||
|
}
|
||||||
|
|
||||||
|
void grabInput_impl(MatrixType& A)
|
||||||
|
{
|
||||||
|
if(MatrixType::RowsAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==Dynamic)
|
||||||
|
m_copyMatrix.resize(0,0);
|
||||||
|
mp_matrix = &A;
|
||||||
|
}
|
||||||
|
|
||||||
void init()
|
void init()
|
||||||
{
|
{
|
||||||
m_isInitialized = false;
|
m_isInitialized = false;
|
||||||
|
@ -220,6 +247,7 @@ protected:
|
||||||
m_maxIterations = -1;
|
m_maxIterations = -1;
|
||||||
m_tolerance = NumTraits<Scalar>::epsilon();
|
m_tolerance = NumTraits<Scalar>::epsilon();
|
||||||
}
|
}
|
||||||
|
MatrixType m_copyMatrix;
|
||||||
const MatrixType* mp_matrix;
|
const MatrixType* mp_matrix;
|
||||||
Preconditioner m_preconditioner;
|
Preconditioner m_preconditioner;
|
||||||
|
|
||||||
|
|
|
@ -374,6 +374,12 @@ template<typename _MatrixType> class FullPivLU
|
||||||
inline Index cols() const { return m_lu.cols(); }
|
inline Index cols() const { return m_lu.cols(); }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_lu;
|
MatrixType m_lu;
|
||||||
PermutationPType m_p;
|
PermutationPType m_p;
|
||||||
PermutationQType m_q;
|
PermutationQType m_q;
|
||||||
|
@ -418,6 +424,8 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
|
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
// the permutations are stored as int indices, so just to be sure:
|
// the permutations are stored as int indices, so just to be sure:
|
||||||
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
|
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
|
||||||
|
|
||||||
|
|
|
@ -171,6 +171,12 @@ template<typename _MatrixType> class PartialPivLU
|
||||||
inline Index cols() const { return m_lu.cols(); }
|
inline Index cols() const { return m_lu.cols(); }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_lu;
|
MatrixType m_lu;
|
||||||
PermutationType m_p;
|
PermutationType m_p;
|
||||||
TranspositionType m_rowsTranspositions;
|
TranspositionType m_rowsTranspositions;
|
||||||
|
@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
|
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
// the row permutation is stored as int indices, so just to be sure:
|
// the row permutation is stored as int indices, so just to be sure:
|
||||||
eigen_assert(matrix.rows()<NumTraits<int>::highest());
|
eigen_assert(matrix.rows()<NumTraits<int>::highest());
|
||||||
|
|
||||||
|
|
|
@ -137,22 +137,27 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||||
degree[i] = len[i]; // degree of node i
|
degree[i] = len[i]; // degree of node i
|
||||||
}
|
}
|
||||||
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
|
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
|
||||||
elen[n] = -2; /* n is a dead element */
|
|
||||||
Cp[n] = -1; /* n is a root of assembly tree */
|
|
||||||
w[n] = 0; /* n is a dead element */
|
|
||||||
|
|
||||||
/* --- Initialize degree lists ------------------------------------------ */
|
/* --- Initialize degree lists ------------------------------------------ */
|
||||||
for(i = 0; i < n; i++)
|
for(i = 0; i < n; i++)
|
||||||
{
|
{
|
||||||
|
bool has_diag = false;
|
||||||
|
for(p = Cp[i]; p<Cp[i+1]; ++p)
|
||||||
|
if(Ci[p]==i)
|
||||||
|
{
|
||||||
|
has_diag = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
d = degree[i];
|
d = degree[i];
|
||||||
if(d == 0) /* node i is empty */
|
if(d == 1 && has_diag) /* node i is empty */
|
||||||
{
|
{
|
||||||
elen[i] = -2; /* element i is dead */
|
elen[i] = -2; /* element i is dead */
|
||||||
nel++;
|
nel++;
|
||||||
Cp[i] = -1; /* i is a root of assembly tree */
|
Cp[i] = -1; /* i is a root of assembly tree */
|
||||||
w[i] = 0;
|
w[i] = 0;
|
||||||
}
|
}
|
||||||
else if(d > dense) /* node i is dense */
|
else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
|
||||||
{
|
{
|
||||||
nv[i] = 0; /* absorb i into element n */
|
nv[i] = 0; /* absorb i into element n */
|
||||||
elen[i] = -1; /* node i is dead */
|
elen[i] = -1; /* node i is dead */
|
||||||
|
@ -168,6 +173,10 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
elen[n] = -2; /* n is a dead element */
|
||||||
|
Cp[n] = -1; /* n is a root of assembly tree */
|
||||||
|
w[n] = 0; /* n is a dead element */
|
||||||
|
|
||||||
while (nel < n) /* while (selecting pivots) do */
|
while (nel < n) /* while (selecting pivots) do */
|
||||||
{
|
{
|
||||||
/* --- Select node of minimum approximate degree -------------------- */
|
/* --- Select node of minimum approximate degree -------------------- */
|
||||||
|
|
|
@ -384,6 +384,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_qr;
|
MatrixType m_qr;
|
||||||
HCoeffsType m_hCoeffs;
|
HCoeffsType m_hCoeffs;
|
||||||
PermutationType m_colsPermutation;
|
PermutationType m_colsPermutation;
|
||||||
|
@ -422,6 +428,8 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
using std::abs;
|
using std::abs;
|
||||||
Index rows = matrix.rows();
|
Index rows = matrix.rows();
|
||||||
Index cols = matrix.cols();
|
Index cols = matrix.cols();
|
||||||
|
@ -463,20 +471,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||||
// we store that back into our table: it can't hurt to correct our table.
|
// we store that back into our table: it can't hurt to correct our table.
|
||||||
m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
|
m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
|
||||||
|
|
||||||
// if the current biggest column is smaller than epsilon times the initial biggest column,
|
// Track the number of meaningful pivots but do not stop the decomposition to make
|
||||||
// terminate to avoid generating nan/inf values.
|
// sure that the initial matrix is properly reproduced. See bug 941.
|
||||||
// Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so)
|
if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
|
||||||
// repetitions of the unit test, with the result of solve() filled with large values of the order
|
|
||||||
// of 1/(size*epsilon).
|
|
||||||
if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
|
|
||||||
{
|
|
||||||
m_nonzero_pivots = k;
|
m_nonzero_pivots = k;
|
||||||
m_hCoeffs.tail(size-k).setZero();
|
|
||||||
m_qr.bottomRightCorner(rows-k,cols-k)
|
|
||||||
.template triangularView<StrictlyLower>()
|
|
||||||
.setZero();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// apply the transposition to the columns
|
// apply the transposition to the columns
|
||||||
m_colsTranspositions.coeffRef(k) = biggest_col_index;
|
m_colsTranspositions.coeffRef(k) = biggest_col_index;
|
||||||
|
@ -505,7 +503,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||||
}
|
}
|
||||||
|
|
||||||
m_colsPermutation.setIdentity(PermIndexType(cols));
|
m_colsPermutation.setIdentity(PermIndexType(cols));
|
||||||
for(PermIndexType k = 0; k < m_nonzero_pivots; ++k)
|
for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
|
||||||
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
|
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
|
||||||
|
|
||||||
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
|
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
|
||||||
|
@ -555,13 +553,15 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
/** \returns the matrix Q as a sequence of householder transformations */
|
/** \returns the matrix Q as a sequence of householder transformations.
|
||||||
|
* You can extract the meaningful part only by using:
|
||||||
|
* \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
|
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
|
||||||
::householderQ() const
|
::householderQ() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
|
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
|
||||||
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots);
|
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \return the column-pivoting Householder QR decomposition of \c *this.
|
/** \return the column-pivoting Householder QR decomposition of \c *this.
|
||||||
|
|
|
@ -63,12 +63,12 @@ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynami
|
||||||
\
|
\
|
||||||
m_nonzero_pivots = 0; \
|
m_nonzero_pivots = 0; \
|
||||||
m_maxpivot = RealScalar(0);\
|
m_maxpivot = RealScalar(0);\
|
||||||
m_colsPermutation.resize((int)cols); \
|
m_colsPermutation.resize(cols); \
|
||||||
m_colsPermutation.indices().setZero(); \
|
m_colsPermutation.indices().setZero(); \
|
||||||
\
|
\
|
||||||
lapack_int lda = (lapack_int) m_qr.outerStride(), i; \
|
lapack_int lda = m_qr.outerStride(), i; \
|
||||||
lapack_int matrix_order = MKLCOLROW; \
|
lapack_int matrix_order = MKLCOLROW; \
|
||||||
LAPACKE_##MKLPREFIX##geqp3( matrix_order, (lapack_int)rows, (lapack_int)cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
|
LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
|
||||||
m_isInitialized = true; \
|
m_isInitialized = true; \
|
||||||
m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
|
m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
|
||||||
m_hCoeffs.adjointInPlace(); \
|
m_hCoeffs.adjointInPlace(); \
|
||||||
|
|
|
@ -368,6 +368,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||||
RealScalar maxPivot() const { return m_maxpivot; }
|
RealScalar maxPivot() const { return m_maxpivot; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_qr;
|
MatrixType m_qr;
|
||||||
HCoeffsType m_hCoeffs;
|
HCoeffsType m_hCoeffs;
|
||||||
IntDiagSizeVectorType m_rows_transpositions;
|
IntDiagSizeVectorType m_rows_transpositions;
|
||||||
|
@ -407,6 +413,8 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
using std::abs;
|
using std::abs;
|
||||||
Index rows = matrix.rows();
|
Index rows = matrix.rows();
|
||||||
Index cols = matrix.cols();
|
Index cols = matrix.cols();
|
||||||
|
|
|
@ -189,6 +189,12 @@ template<typename _MatrixType> class HouseholderQR
|
||||||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
MatrixType m_qr;
|
MatrixType m_qr;
|
||||||
HCoeffsType m_hCoeffs;
|
HCoeffsType m_hCoeffs;
|
||||||
RowVectorType m_temp;
|
RowVectorType m_temp;
|
||||||
|
@ -349,6 +355,8 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
Index rows = matrix.rows();
|
Index rows = matrix.rows();
|
||||||
Index cols = matrix.cols();
|
Index cols = matrix.cols();
|
||||||
Index size = (std::min)(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
|
|
|
@ -47,7 +47,7 @@ namespace Eigen {
|
||||||
* You can then apply it to a vector.
|
* You can then apply it to a vector.
|
||||||
*
|
*
|
||||||
* R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
|
* R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
|
||||||
* NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index
|
* NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index
|
||||||
*
|
*
|
||||||
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
|
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
|
||||||
* NOTE
|
* NOTE
|
||||||
|
@ -59,24 +59,18 @@ class SPQR
|
||||||
public:
|
public:
|
||||||
typedef typename _MatrixType::Scalar Scalar;
|
typedef typename _MatrixType::Scalar Scalar;
|
||||||
typedef typename _MatrixType::RealScalar RealScalar;
|
typedef typename _MatrixType::RealScalar RealScalar;
|
||||||
typedef UF_long Index ;
|
typedef SuiteSparse_long Index ;
|
||||||
typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
|
typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
|
||||||
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
|
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
|
||||||
public:
|
public:
|
||||||
SPQR()
|
SPQR()
|
||||||
: m_isInitialized(false),
|
: m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
|
||||||
m_ordering(SPQR_ORDERING_DEFAULT),
|
|
||||||
m_allow_tol(SPQR_DEFAULT_TOL),
|
|
||||||
m_tolerance (NumTraits<Scalar>::epsilon())
|
|
||||||
{
|
{
|
||||||
cholmod_l_start(&m_cc);
|
cholmod_l_start(&m_cc);
|
||||||
}
|
}
|
||||||
|
|
||||||
SPQR(const _MatrixType& matrix)
|
SPQR(const _MatrixType& matrix)
|
||||||
: m_isInitialized(false),
|
: m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
|
||||||
m_ordering(SPQR_ORDERING_DEFAULT),
|
|
||||||
m_allow_tol(SPQR_DEFAULT_TOL),
|
|
||||||
m_tolerance (NumTraits<Scalar>::epsilon())
|
|
||||||
{
|
{
|
||||||
cholmod_l_start(&m_cc);
|
cholmod_l_start(&m_cc);
|
||||||
compute(matrix);
|
compute(matrix);
|
||||||
|
@ -101,10 +95,26 @@ class SPQR
|
||||||
if(m_isInitialized) SPQR_free();
|
if(m_isInitialized) SPQR_free();
|
||||||
|
|
||||||
MatrixType mat(matrix);
|
MatrixType mat(matrix);
|
||||||
|
|
||||||
|
/* Compute the default threshold as in MatLab, see:
|
||||||
|
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
|
||||||
|
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
|
||||||
|
*/
|
||||||
|
RealScalar pivotThreshold = m_tolerance;
|
||||||
|
if(m_useDefaultThreshold)
|
||||||
|
{
|
||||||
|
using std::max;
|
||||||
|
RealScalar max2Norm = 0.0;
|
||||||
|
for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm());
|
||||||
|
if(max2Norm==RealScalar(0))
|
||||||
|
max2Norm = RealScalar(1);
|
||||||
|
pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
|
||||||
|
}
|
||||||
|
|
||||||
cholmod_sparse A;
|
cholmod_sparse A;
|
||||||
A = viewAsCholmod(mat);
|
A = viewAsCholmod(mat);
|
||||||
Index col = matrix.cols();
|
Index col = matrix.cols();
|
||||||
m_rank = SuiteSparseQR<Scalar>(m_ordering, m_tolerance, col, &A,
|
m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
|
||||||
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
|
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
|
||||||
|
|
||||||
if (!m_cR)
|
if (!m_cR)
|
||||||
|
@ -120,7 +130,7 @@ class SPQR
|
||||||
/**
|
/**
|
||||||
* Get the number of rows of the input matrix and the Q matrix
|
* Get the number of rows of the input matrix and the Q matrix
|
||||||
*/
|
*/
|
||||||
inline Index rows() const {return m_H->nrow; }
|
inline Index rows() const {return m_cR->nrow; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the number of columns of the input matrix.
|
* Get the number of columns of the input matrix.
|
||||||
|
@ -145,16 +155,25 @@ class SPQR
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
|
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
|
||||||
eigen_assert(b.cols()==1 && "This method is for vectors only");
|
eigen_assert(b.cols()==1 && "This method is for vectors only");
|
||||||
|
|
||||||
//Compute Q^T * b
|
//Compute Q^T * b
|
||||||
typename Dest::PlainObject y;
|
typename Dest::PlainObject y, y2;
|
||||||
y = matrixQ().transpose() * b;
|
y = matrixQ().transpose() * b;
|
||||||
// Solves with the triangular matrix R
|
|
||||||
|
// Solves with the triangular matrix R
|
||||||
Index rk = this->rank();
|
Index rk = this->rank();
|
||||||
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y.topRows(rk));
|
y2 = y;
|
||||||
y.bottomRows(cols()-rk).setZero();
|
y.resize((std::max)(cols(),Index(y.rows())),y.cols());
|
||||||
|
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
|
||||||
|
|
||||||
// Apply the column permutation
|
// Apply the column permutation
|
||||||
dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
|
// colsPermutation() performs a copy of the permutation,
|
||||||
|
// so let's apply it manually:
|
||||||
|
for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
|
||||||
|
for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
|
||||||
|
|
||||||
|
// y.bottomRows(y.rows()-rk).setZero();
|
||||||
|
// dest = colsPermutation() * y.topRows(cols());
|
||||||
|
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
}
|
}
|
||||||
|
@ -197,7 +216,11 @@ class SPQR
|
||||||
/// Set the fill-reducing ordering method to be used
|
/// Set the fill-reducing ordering method to be used
|
||||||
void setSPQROrdering(int ord) { m_ordering = ord;}
|
void setSPQROrdering(int ord) { m_ordering = ord;}
|
||||||
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
|
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
|
||||||
void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
|
void setPivotThreshold(const RealScalar& tol)
|
||||||
|
{
|
||||||
|
m_useDefaultThreshold = false;
|
||||||
|
m_tolerance = tol;
|
||||||
|
}
|
||||||
|
|
||||||
/** \returns a pointer to the SPQR workspace */
|
/** \returns a pointer to the SPQR workspace */
|
||||||
cholmod_common *cholmodCommon() const { return &m_cc; }
|
cholmod_common *cholmodCommon() const { return &m_cc; }
|
||||||
|
@ -230,6 +253,7 @@ class SPQR
|
||||||
mutable cholmod_dense *m_HTau; // The Householder coefficients
|
mutable cholmod_dense *m_HTau; // The Householder coefficients
|
||||||
mutable Index m_rank; // The rank of the matrix
|
mutable Index m_rank; // The rank of the matrix
|
||||||
mutable cholmod_common m_cc; // Workspace and parameters
|
mutable cholmod_common m_cc; // Workspace and parameters
|
||||||
|
bool m_useDefaultThreshold; // Use default threshold
|
||||||
template<typename ,typename > friend struct SPQR_QProduct;
|
template<typename ,typename > friend struct SPQR_QProduct;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -742,6 +742,11 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void allocate(Index rows, Index cols, unsigned int computationOptions);
|
void allocate(Index rows, Index cols, unsigned int computationOptions);
|
||||||
|
|
||||||
|
static void check_template_parameters()
|
||||||
|
{
|
||||||
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MatrixUType m_matrixU;
|
MatrixUType m_matrixU;
|
||||||
|
@ -818,6 +823,8 @@ template<typename MatrixType, int QRPreconditioner>
|
||||||
JacobiSVD<MatrixType, QRPreconditioner>&
|
JacobiSVD<MatrixType, QRPreconditioner>&
|
||||||
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
|
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
|
||||||
{
|
{
|
||||||
|
check_template_parameters();
|
||||||
|
|
||||||
using std::abs;
|
using std::abs;
|
||||||
allocate(matrix.rows(), matrix.cols(), computationOptions);
|
allocate(matrix.rows(), matrix.cols(), computationOptions);
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,16 @@ public:
|
||||||
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||||
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
inline const Scalar coeff(int row, int col) const
|
||||||
|
{
|
||||||
|
return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline const Scalar coeff(int index) const
|
||||||
|
{
|
||||||
|
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||||
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||||
|
@ -226,6 +236,21 @@ public:
|
||||||
else
|
else
|
||||||
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline Scalar& coeffRef(int row, int col)
|
||||||
|
{
|
||||||
|
return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline const Scalar coeff(int row, int col) const
|
||||||
|
{
|
||||||
|
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline const Scalar coeff(int index) const
|
||||||
|
{
|
||||||
|
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||||
|
}
|
||||||
|
|
||||||
const Scalar& lastCoeff() const
|
const Scalar& lastCoeff() const
|
||||||
{
|
{
|
||||||
|
@ -313,6 +338,16 @@ public:
|
||||||
else
|
else
|
||||||
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline const Scalar coeff(int row, int col) const
|
||||||
|
{
|
||||||
|
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline const Scalar coeff(int index) const
|
||||||
|
{
|
||||||
|
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||||
|
}
|
||||||
|
|
||||||
const Scalar& lastCoeff() const
|
const Scalar& lastCoeff() const
|
||||||
{
|
{
|
||||||
|
@ -355,7 +390,8 @@ const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatri
|
||||||
* is col-major (resp. row-major).
|
* is col-major (resp. row-major).
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
|
typename SparseMatrixBase<Derived>::InnerVectorsReturnType
|
||||||
|
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
|
||||||
{
|
{
|
||||||
return Block<Derived,Dynamic,Dynamic,true>(derived(),
|
return Block<Derived,Dynamic,Dynamic,true>(derived(),
|
||||||
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
||||||
|
@ -367,7 +403,8 @@ Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Inde
|
||||||
* is col-major (resp. row-major). Read-only.
|
* is col-major (resp. row-major). Read-only.
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
|
const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
|
||||||
|
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
|
||||||
{
|
{
|
||||||
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
|
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
|
||||||
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
||||||
|
@ -394,8 +431,8 @@ public:
|
||||||
: m_matrix(xpr),
|
: m_matrix(xpr),
|
||||||
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
|
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
|
||||||
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
|
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
|
||||||
m_blockRows(xpr.rows()),
|
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
|
||||||
m_blockCols(xpr.cols())
|
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
|
||||||
{}
|
{}
|
||||||
|
|
||||||
/** Dynamic-size constructor
|
/** Dynamic-size constructor
|
||||||
|
@ -497,3 +534,4 @@ public:
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
||||||
#endif // EIGEN_SPARSE_BLOCK_H
|
#endif // EIGEN_SPARSE_BLOCK_H
|
||||||
|
|
||||||
|
|
|
@ -314,10 +314,10 @@ SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& othe
|
||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
|
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
|
||||||
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
|
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
|
||||||
{
|
{
|
||||||
return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
|
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
|
@ -180,7 +180,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
|
||||||
typename Res::Scalar tmp(0);
|
typename Res::Scalar tmp(0);
|
||||||
for(LhsInnerIterator it(lhs,j); it ;++it)
|
for(LhsInnerIterator it(lhs,j); it ;++it)
|
||||||
tmp += it.value() * rhs.coeff(it.index(),c);
|
tmp += it.value() * rhs.coeff(it.index(),c);
|
||||||
res.coeffRef(j,c) = alpha * tmp;
|
res.coeffRef(j,c) += alpha * tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -691,7 +691,8 @@ class SparseMatrix
|
||||||
m_data.swap(other.m_data);
|
m_data.swap(other.m_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Sets *this to the identity matrix */
|
/** Sets *this to the identity matrix.
|
||||||
|
* This function also turns the matrix into compressed mode, and drop any reserved memory. */
|
||||||
inline void setIdentity()
|
inline void setIdentity()
|
||||||
{
|
{
|
||||||
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
|
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
|
||||||
|
@ -699,6 +700,8 @@ class SparseMatrix
|
||||||
Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
|
Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
|
||||||
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
|
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
|
||||||
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
||||||
|
std::free(m_innerNonZeros);
|
||||||
|
m_innerNonZeros = 0;
|
||||||
}
|
}
|
||||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||||
{
|
{
|
||||||
|
|
|
@ -23,7 +23,14 @@ namespace Eigen {
|
||||||
* This class can be extended with the help of the plugin mechanism described on the page
|
* This class can be extended with the help of the plugin mechanism described on the page
|
||||||
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
|
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
|
||||||
*/
|
*/
|
||||||
template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
template<typename Derived> class SparseMatrixBase
|
||||||
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
|
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||||
|
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
||||||
|
EigenBase<Derived> >
|
||||||
|
#else
|
||||||
|
: public EigenBase<Derived>
|
||||||
|
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -36,7 +43,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||||
>::type PacketReturnType;
|
>::type PacketReturnType;
|
||||||
|
|
||||||
typedef SparseMatrixBase StorageBaseType;
|
typedef SparseMatrixBase StorageBaseType;
|
||||||
typedef EigenBase<Derived> Base;
|
|
||||||
|
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& operator=(const EigenBase<OtherDerived> &other)
|
Derived& operator=(const EigenBase<OtherDerived> &other)
|
||||||
|
@ -132,6 +138,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||||
inline Derived& derived() { return *static_cast<Derived*>(this); }
|
inline Derived& derived() { return *static_cast<Derived*>(this); }
|
||||||
inline Derived& const_cast_derived() const
|
inline Derived& const_cast_derived() const
|
||||||
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
|
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
|
||||||
|
|
||||||
|
typedef internal::special_scalar_op_base<Derived, Scalar, RealScalar, EigenBase<Derived> > Base;
|
||||||
|
using Base::operator*;
|
||||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||||
|
|
||||||
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
|
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
|
||||||
|
@ -317,20 +326,18 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||||
Derived& operator*=(const Scalar& other);
|
Derived& operator*=(const Scalar& other);
|
||||||
Derived& operator/=(const Scalar& other);
|
Derived& operator/=(const Scalar& other);
|
||||||
|
|
||||||
#define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
|
template<typename OtherDerived> struct CwiseProductDenseReturnType {
|
||||||
CwiseBinaryOp< \
|
typedef CwiseBinaryOp<internal::scalar_product_op<typename internal::scalar_product_traits<
|
||||||
internal::scalar_product_op< \
|
typename internal::traits<Derived>::Scalar,
|
||||||
typename internal::scalar_product_traits< \
|
typename internal::traits<OtherDerived>::Scalar
|
||||||
typename internal::traits<Derived>::Scalar, \
|
>::ReturnType>,
|
||||||
typename internal::traits<OtherDerived>::Scalar \
|
const Derived,
|
||||||
>::ReturnType \
|
const OtherDerived
|
||||||
>, \
|
> Type;
|
||||||
const Derived, \
|
};
|
||||||
const OtherDerived \
|
|
||||||
>
|
|
||||||
|
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
|
EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type
|
||||||
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
|
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
|
||||||
|
|
||||||
// sparse * sparse
|
// sparse * sparse
|
||||||
|
@ -404,8 +411,10 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||||
const ConstInnerVectorReturnType innerVector(Index outer) const;
|
const ConstInnerVectorReturnType innerVector(Index outer) const;
|
||||||
|
|
||||||
// set of inner-vectors
|
// set of inner-vectors
|
||||||
Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
|
typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
|
||||||
const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
|
typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
|
||||||
|
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
|
||||||
|
const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
|
||||||
|
|
||||||
/** \internal use operator= */
|
/** \internal use operator= */
|
||||||
template<typename DenseDerived>
|
template<typename DenseDerived>
|
||||||
|
|
|
@ -67,7 +67,6 @@ const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
|
||||||
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
|
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
|
||||||
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
|
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
|
||||||
|
|
||||||
template<typename Derived> class SparseMatrixBase;
|
|
||||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
|
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
|
||||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
|
template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
|
||||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
|
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
|
||||||
|
|
|
@ -158,6 +158,7 @@ class SparseVector
|
||||||
|
|
||||||
Index inner = IsColVector ? row : col;
|
Index inner = IsColVector ? row : col;
|
||||||
Index outer = IsColVector ? col : row;
|
Index outer = IsColVector ? col : row;
|
||||||
|
EIGEN_ONLY_USED_FOR_DEBUG(outer);
|
||||||
eigen_assert(outer==0);
|
eigen_assert(outer==0);
|
||||||
return insert(inner);
|
return insert(inner);
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
|
||||||
for(int i=lhs.rows()-1 ; i>=0 ; --i)
|
for(int i=lhs.rows()-1 ; i>=0 ; --i)
|
||||||
{
|
{
|
||||||
Scalar tmp = other.coeff(i,col);
|
Scalar tmp = other.coeff(i,col);
|
||||||
Scalar l_ii = 0;
|
Scalar l_ii(0);
|
||||||
typename Lhs::InnerIterator it(lhs, i);
|
typename Lhs::InnerIterator it(lhs, i);
|
||||||
while(it && it.index()<i)
|
while(it && it.index()<i)
|
||||||
++it;
|
++it;
|
||||||
|
|
|
@ -268,7 +268,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||||
{
|
{
|
||||||
if(it.index() == j)
|
if(it.index() == j)
|
||||||
{
|
{
|
||||||
det *= (std::abs)(it.value());
|
using std::abs;
|
||||||
|
det *= abs(it.value());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -295,7 +296,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||||
if(it.row() < j) continue;
|
if(it.row() < j) continue;
|
||||||
if(it.row() == j)
|
if(it.row() == j)
|
||||||
{
|
{
|
||||||
det += (std::log)((std::abs)(it.value()));
|
using std::log; using std::abs;
|
||||||
|
det += log(abs(it.value()));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -303,21 +305,64 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||||
return det;
|
return det;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns A number representing the sign of the determinant
|
/** \returns A number representing the sign of the determinant
|
||||||
*
|
*
|
||||||
* \sa absDeterminant(), logAbsDeterminant()
|
* \sa absDeterminant(), logAbsDeterminant()
|
||||||
*/
|
*/
|
||||||
Scalar signDeterminant()
|
Scalar signDeterminant()
|
||||||
{
|
{
|
||||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||||
return Scalar(m_detPermR);
|
// Initialize with the determinant of the row matrix
|
||||||
}
|
Index det = 1;
|
||||||
|
// Note that the diagonal blocks of U are stored in supernodes,
|
||||||
|
// which are available in the L part :)
|
||||||
|
for (Index j = 0; j < this->cols(); ++j)
|
||||||
|
{
|
||||||
|
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
|
||||||
|
{
|
||||||
|
if(it.index() == j)
|
||||||
|
{
|
||||||
|
if(it.value()<0)
|
||||||
|
det = -det;
|
||||||
|
else if(it.value()==0)
|
||||||
|
return 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return det * m_detPermR * m_detPermC;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** \returns The determinant of the matrix.
|
||||||
|
*
|
||||||
|
* \sa absDeterminant(), logAbsDeterminant()
|
||||||
|
*/
|
||||||
|
Scalar determinant()
|
||||||
|
{
|
||||||
|
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||||
|
// Initialize with the determinant of the row matrix
|
||||||
|
Scalar det = Scalar(1.);
|
||||||
|
// Note that the diagonal blocks of U are stored in supernodes,
|
||||||
|
// which are available in the L part :)
|
||||||
|
for (Index j = 0; j < this->cols(); ++j)
|
||||||
|
{
|
||||||
|
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
|
||||||
|
{
|
||||||
|
if(it.index() == j)
|
||||||
|
{
|
||||||
|
det *= it.value();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return det * Scalar(m_detPermR * m_detPermC);
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Functions
|
// Functions
|
||||||
void initperfvalues()
|
void initperfvalues()
|
||||||
{
|
{
|
||||||
m_perfv.panel_size = 1;
|
m_perfv.panel_size = 16;
|
||||||
m_perfv.relax = 1;
|
m_perfv.relax = 1;
|
||||||
m_perfv.maxsuper = 128;
|
m_perfv.maxsuper = 128;
|
||||||
m_perfv.rowblk = 16;
|
m_perfv.rowblk = 16;
|
||||||
|
@ -345,8 +390,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||||
// values for performance
|
// values for performance
|
||||||
internal::perfvalues<Index> m_perfv;
|
internal::perfvalues<Index> m_perfv;
|
||||||
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
|
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
|
||||||
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
||||||
Index m_detPermR; // Determinant of the coefficient matrix
|
Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
|
||||||
private:
|
private:
|
||||||
// Disable copy constructor
|
// Disable copy constructor
|
||||||
SparseLU (const SparseLU& );
|
SparseLU (const SparseLU& );
|
||||||
|
@ -622,7 +667,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the determinant of the row permutation matrix
|
// Update the determinant of the row permutation matrix
|
||||||
if (pivrow != jj) m_detPermR *= -1;
|
// FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
|
||||||
|
if (pivrow != jj) m_detPermR = -m_detPermR;
|
||||||
|
|
||||||
// Prune columns (0:jj-1) using column jj
|
// Prune columns (0:jj-1) using column jj
|
||||||
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
|
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
|
||||||
|
@ -637,10 +683,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||||
jcol += panel_size; // Move to the next panel
|
jcol += panel_size; // Move to the next panel
|
||||||
} // end for -- end elimination
|
} // end for -- end elimination
|
||||||
|
|
||||||
|
m_detPermR = m_perm_r.determinant();
|
||||||
|
m_detPermC = m_perm_c.determinant();
|
||||||
|
|
||||||
// Count the number of nonzeros in factors
|
// Count the number of nonzeros in factors
|
||||||
Base::countnz(n, m_nnzL, m_nnzU, m_glu);
|
Base::countnz(n, m_nnzL, m_nnzU, m_glu);
|
||||||
// Apply permutation to the L subscripts
|
// Apply permutation to the L subscripts
|
||||||
Base::fixupL(n, m_perm_r.indices(), m_glu);
|
Base::fixupL(n, m_perm_r.indices(), m_glu);
|
||||||
|
|
||||||
// Create supernode matrix L
|
// Create supernode matrix L
|
||||||
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
|
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
|
||||||
|
@ -700,8 +749,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||||
Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||||
U = A.template triangularView<Upper>().solve(U);
|
U = A.template triangularView<Upper>().solve(U);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,8 @@ class SparseLUImpl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||||
|
typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> ScalarMatrix;
|
||||||
|
typedef Map<ScalarMatrix, 0, OuterStride<> > MappedMatrixBlock;
|
||||||
typedef Matrix<Index,Dynamic,1> IndexVector;
|
typedef Matrix<Index,Dynamic,1> IndexVector;
|
||||||
typedef typename ScalarVector::RealScalar RealScalar;
|
typedef typename ScalarVector::RealScalar RealScalar;
|
||||||
typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
|
typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
|
||||||
|
|
|
@ -153,8 +153,8 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
|
||||||
{
|
{
|
||||||
Index& num_expansions = glu.num_expansions; //No memory expansions so far
|
Index& num_expansions = glu.num_expansions; //No memory expansions so far
|
||||||
num_expansions = 0;
|
num_expansions = 0;
|
||||||
glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U
|
glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U
|
||||||
glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor
|
glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor
|
||||||
// Return the estimated size to the user if necessary
|
// Return the estimated size to the user if necessary
|
||||||
Index tempSpace;
|
Index tempSpace;
|
||||||
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
|
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
|
||||||
|
|
|
@ -236,7 +236,7 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
|
||||||
Index n = X.rows();
|
Index n = X.rows();
|
||||||
Index nrhs = X.cols();
|
Index nrhs = X.cols();
|
||||||
const Scalar * Lval = valuePtr(); // Nonzero values
|
const Scalar * Lval = valuePtr(); // Nonzero values
|
||||||
Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
|
Matrix<Scalar,Dynamic,Dynamic, ColMajor> work(n, nrhs); // working vector
|
||||||
work.setZero();
|
work.setZero();
|
||||||
for (Index k = 0; k <= nsuper(); k ++)
|
for (Index k = 0; k <= nsuper(); k ++)
|
||||||
{
|
{
|
||||||
|
@ -267,12 +267,12 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
|
||||||
Index lda = colIndexPtr()[fsupc+1] - luptr;
|
Index lda = colIndexPtr()[fsupc+1] - luptr;
|
||||||
|
|
||||||
// Triangular solve
|
// Triangular solve
|
||||||
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||||
Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||||
U = A.template triangularView<UnitLower>().solve(U);
|
U = A.template triangularView<UnitLower>().solve(U);
|
||||||
|
|
||||||
// Matrix-vector product
|
// Matrix-vector product
|
||||||
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||||
work.block(0, 0, nrow, nrhs) = A * U;
|
work.block(0, 0, nrow, nrhs) = A * U;
|
||||||
|
|
||||||
//Begin Scatter
|
//Begin Scatter
|
||||||
|
|
|
@ -162,11 +162,11 @@ Index SparseLUImpl<Scalar,Index>::column_bmod(const Index jcol, const Index nseg
|
||||||
// points to the beginning of jcol in snode L\U(jsupno)
|
// points to the beginning of jcol in snode L\U(jsupno)
|
||||||
ufirst = glu.xlusup(jcol) + d_fsupc;
|
ufirst = glu.xlusup(jcol) + d_fsupc;
|
||||||
Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
|
Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||||
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
|
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
|
||||||
u = A.template triangularView<UnitLower>().solve(u);
|
u = A.template triangularView<UnitLower>().solve(u);
|
||||||
|
|
||||||
new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
|
||||||
VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
|
VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
|
||||||
l.noalias() -= A * u;
|
l.noalias() -= A * u;
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsi
|
||||||
// Dense triangular solve -- start effective triangle
|
// Dense triangular solve -- start effective triangle
|
||||||
luptr += lda * no_zeros + no_zeros;
|
luptr += lda * no_zeros + no_zeros;
|
||||||
// Form Eigen matrix and vector
|
// Form Eigen matrix and vector
|
||||||
Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
|
Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
|
||||||
Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
|
Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
|
||||||
|
|
||||||
u = A.template triangularView<UnitLower>().solve(u);
|
u = A.template triangularView<UnitLower>().solve(u);
|
||||||
|
@ -65,7 +65,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsi
|
||||||
luptr += segsize;
|
luptr += segsize;
|
||||||
const Index PacketSize = internal::packet_traits<Scalar>::size;
|
const Index PacketSize = internal::packet_traits<Scalar>::size;
|
||||||
Index ldl = internal::first_multiple(nrow, PacketSize);
|
Index ldl = internal::first_multiple(nrow, PacketSize);
|
||||||
Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
|
Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
|
||||||
Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
|
Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
|
||||||
Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
|
Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
|
||||||
Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
|
Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
|
||||||
|
|
|
@ -102,7 +102,7 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const
|
||||||
if(nsupc >= 2)
|
if(nsupc >= 2)
|
||||||
{
|
{
|
||||||
Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
|
Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
|
Map<ScalarMatrix, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
|
||||||
|
|
||||||
// gather U
|
// gather U
|
||||||
Index u_col = 0;
|
Index u_col = 0;
|
||||||
|
@ -136,17 +136,17 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const
|
||||||
Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
|
Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
|
||||||
no_zeros = (krep - u_rows + 1) - fsupc;
|
no_zeros = (krep - u_rows + 1) - fsupc;
|
||||||
luptr += lda * no_zeros + no_zeros;
|
luptr += lda * no_zeros + no_zeros;
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
|
MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
|
||||||
U = A.template triangularView<UnitLower>().solve(U);
|
U = A.template triangularView<UnitLower>().solve(U);
|
||||||
|
|
||||||
// update
|
// update
|
||||||
luptr += u_rows;
|
luptr += u_rows;
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
|
MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
|
||||||
eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
|
eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
|
||||||
|
|
||||||
Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
|
Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
|
||||||
Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
|
Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
|
MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
|
||||||
|
|
||||||
L.setZero();
|
L.setZero();
|
||||||
internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
|
internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
|
||||||
|
|
|
@ -71,13 +71,14 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
|
||||||
|
|
||||||
// Determine the largest abs numerical value for partial pivoting
|
// Determine the largest abs numerical value for partial pivoting
|
||||||
Index diagind = iperm_c(jcol); // diagonal index
|
Index diagind = iperm_c(jcol); // diagonal index
|
||||||
RealScalar pivmax = 0.0;
|
RealScalar pivmax(-1.0);
|
||||||
Index pivptr = nsupc;
|
Index pivptr = nsupc;
|
||||||
Index diag = emptyIdxLU;
|
Index diag = emptyIdxLU;
|
||||||
RealScalar rtemp;
|
RealScalar rtemp;
|
||||||
Index isub, icol, itemp, k;
|
Index isub, icol, itemp, k;
|
||||||
for (isub = nsupc; isub < nsupr; ++isub) {
|
for (isub = nsupc; isub < nsupr; ++isub) {
|
||||||
rtemp = std::abs(lu_col_ptr[isub]);
|
using std::abs;
|
||||||
|
rtemp = abs(lu_col_ptr[isub]);
|
||||||
if (rtemp > pivmax) {
|
if (rtemp > pivmax) {
|
||||||
pivmax = rtemp;
|
pivmax = rtemp;
|
||||||
pivptr = isub;
|
pivptr = isub;
|
||||||
|
@ -86,8 +87,9 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for singularity
|
// Test for singularity
|
||||||
if ( pivmax == 0.0 ) {
|
if ( pivmax <= RealScalar(0.0) ) {
|
||||||
pivrow = lsub_ptr[pivptr];
|
// if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero
|
||||||
|
pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr];
|
||||||
perm_r(pivrow) = jcol;
|
perm_r(pivrow) = jcol;
|
||||||
return (jcol+1);
|
return (jcol+1);
|
||||||
}
|
}
|
||||||
|
@ -101,7 +103,8 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
|
||||||
if (diag >= 0 )
|
if (diag >= 0 )
|
||||||
{
|
{
|
||||||
// Diagonal element exists
|
// Diagonal element exists
|
||||||
rtemp = std::abs(lu_col_ptr[diag]);
|
using std::abs;
|
||||||
|
rtemp = abs(lu_col_ptr[diag]);
|
||||||
if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
|
if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
|
||||||
}
|
}
|
||||||
pivrow = lsub_ptr[pivptr];
|
pivrow = lsub_ptr[pivptr];
|
||||||
|
|
|
@ -70,6 +70,43 @@ max
|
||||||
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
|
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \
|
||||||
|
template<typename OtherDerived> \
|
||||||
|
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \
|
||||||
|
OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||||
|
{ \
|
||||||
|
return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \
|
||||||
|
}\
|
||||||
|
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \
|
||||||
|
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \
|
||||||
|
EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \
|
||||||
|
OP(const Scalar& s) const { \
|
||||||
|
return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
|
||||||
|
} \
|
||||||
|
friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
|
||||||
|
OP(const Scalar& s, const Derived& d) { \
|
||||||
|
return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \
|
||||||
|
template<typename OtherDerived> \
|
||||||
|
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \
|
||||||
|
OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||||
|
{ \
|
||||||
|
return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
inline const RCmp ## RCOMPARATOR ## ReturnType \
|
||||||
|
OP(const Scalar& s) const { \
|
||||||
|
return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \
|
||||||
|
} \
|
||||||
|
friend inline const Cmp ## RCOMPARATOR ## ReturnType \
|
||||||
|
OP(const Scalar& s, const Derived& d) { \
|
||||||
|
return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise \< operator of *this and \a other
|
/** \returns an expression of the coefficient-wise \< operator of *this and \a other
|
||||||
*
|
*
|
||||||
* Example: \include Cwise_less.cpp
|
* Example: \include Cwise_less.cpp
|
||||||
|
@ -77,7 +114,7 @@ max
|
||||||
*
|
*
|
||||||
* \sa all(), any(), operator>(), operator<=()
|
* \sa all(), any(), operator>(), operator<=()
|
||||||
*/
|
*/
|
||||||
EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
|
EIGEN_MAKE_CWISE_COMP_OP(operator<, LT)
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
|
/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
|
||||||
*
|
*
|
||||||
|
@ -86,7 +123,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
|
||||||
*
|
*
|
||||||
* \sa all(), any(), operator>=(), operator<()
|
* \sa all(), any(), operator>=(), operator<()
|
||||||
*/
|
*/
|
||||||
EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
|
EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise \> operator of *this and \a other
|
/** \returns an expression of the coefficient-wise \> operator of *this and \a other
|
||||||
*
|
*
|
||||||
|
@ -95,7 +132,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
|
||||||
*
|
*
|
||||||
* \sa all(), any(), operator>=(), operator<()
|
* \sa all(), any(), operator>=(), operator<()
|
||||||
*/
|
*/
|
||||||
EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
|
EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
|
/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
|
||||||
*
|
*
|
||||||
|
@ -104,7 +141,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
|
||||||
*
|
*
|
||||||
* \sa all(), any(), operator>(), operator<=()
|
* \sa all(), any(), operator>(), operator<=()
|
||||||
*/
|
*/
|
||||||
EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
|
EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise == operator of *this and \a other
|
/** \returns an expression of the coefficient-wise == operator of *this and \a other
|
||||||
*
|
*
|
||||||
|
@ -118,7 +155,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
|
||||||
*
|
*
|
||||||
* \sa all(), any(), isApprox(), isMuchSmallerThan()
|
* \sa all(), any(), isApprox(), isMuchSmallerThan()
|
||||||
*/
|
*/
|
||||||
EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
|
EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise != operator of *this and \a other
|
/** \returns an expression of the coefficient-wise != operator of *this and \a other
|
||||||
*
|
*
|
||||||
|
@ -132,7 +169,10 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
|
||||||
*
|
*
|
||||||
* \sa all(), any(), isApprox(), isMuchSmallerThan()
|
* \sa all(), any(), isApprox(), isMuchSmallerThan()
|
||||||
*/
|
*/
|
||||||
EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to)
|
EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)
|
||||||
|
|
||||||
|
#undef EIGEN_MAKE_CWISE_COMP_OP
|
||||||
|
#undef EIGEN_MAKE_CWISE_COMP_R_OP
|
||||||
|
|
||||||
// scalar addition
|
// scalar addition
|
||||||
|
|
||||||
|
@ -209,3 +249,5 @@ operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
|
||||||
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
|
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
|
||||||
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
|
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -185,19 +185,3 @@ cube() const
|
||||||
{
|
{
|
||||||
return derived();
|
return derived();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \
|
|
||||||
inline const CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
|
|
||||||
METHOD_NAME(const Scalar& s) const { \
|
|
||||||
return CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
|
|
||||||
(derived(), std::bind2nd(FUNCTOR<Scalar>(), s)); \
|
|
||||||
}
|
|
||||||
|
|
||||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator==, std::equal_to)
|
|
||||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator!=, std::not_equal_to)
|
|
||||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<, std::less)
|
|
||||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
|
|
||||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
|
|
||||||
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -124,3 +124,20 @@ cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
|
||||||
{
|
{
|
||||||
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
|
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;
|
||||||
|
|
||||||
|
/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
|
||||||
|
*
|
||||||
|
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
|
||||||
|
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
|
||||||
|
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
|
||||||
|
* isMuchSmallerThan().
|
||||||
|
*
|
||||||
|
* \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
|
||||||
|
*/
|
||||||
|
inline const CwiseScalarEqualReturnType
|
||||||
|
cwiseEqual(const Scalar& s) const
|
||||||
|
{
|
||||||
|
return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,internal::cmp_EQ>());
|
||||||
|
}
|
||||||
|
|
|
@ -50,18 +50,3 @@ cwiseSqrt() const { return derived(); }
|
||||||
inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
|
inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
|
||||||
cwiseInverse() const { return derived(); }
|
cwiseInverse() const { return derived(); }
|
||||||
|
|
||||||
/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
|
|
||||||
*
|
|
||||||
* \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
|
|
||||||
* In order to check for equality between two vectors or matrices with floating-point coefficients, it is
|
|
||||||
* generally a far better idea to use a fuzzy comparison as provided by isApprox() and
|
|
||||||
* isMuchSmallerThan().
|
|
||||||
*
|
|
||||||
* \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
|
|
||||||
*/
|
|
||||||
inline const CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >, const Derived>
|
|
||||||
cwiseEqual(const Scalar& s) const
|
|
||||||
{
|
|
||||||
return CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,const Derived>
|
|
||||||
(derived(), std::bind1st(std::equal_to<Scalar>(), s));
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
#if (defined __GNUC__)
|
#if (defined __GNUC__) && (!defined __MINGW32__) && (!defined __CYGWIN__)
|
||||||
#define EIGEN_WEAK_LINKING __attribute__ ((weak))
|
#define EIGEN_WEAK_LINKING __attribute__ ((weak))
|
||||||
#else
|
#else
|
||||||
#define EIGEN_WEAK_LINKING
|
#define EIGEN_WEAK_LINKING
|
||||||
|
|
|
@ -26,29 +26,18 @@ include(CTest)
|
||||||
|
|
||||||
set(EIGEN_TEST_BUILD_FLAGS " " CACHE STRING "Options passed to the build command of unit tests")
|
set(EIGEN_TEST_BUILD_FLAGS " " CACHE STRING "Options passed to the build command of unit tests")
|
||||||
|
|
||||||
# overwrite default DartConfiguration.tcl
|
# Overwrite default DartConfiguration.tcl such that ctest can build our unit tests.
|
||||||
# The worarounds are different for each version of the MSVC IDE
|
# Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target.
|
||||||
if(MSVC_IDE)
|
# At this stage, we can also add custom flags to the build tool through the user defined EIGEN_TEST_BUILD_FLAGS variable.
|
||||||
if(CMAKE_MAKE_PROGRAM_SAVE MATCHES "devenv") # devenv
|
file(READ "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" EIGEN_DART_CONFIG_FILE)
|
||||||
set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} Eigen.sln /build \"Release\" /project buildtests ${EIGEN_TEST_BUILD_FLAGS} \n# ")
|
# try to grab the default flags
|
||||||
else() # msbuild
|
string(REGEX MATCH "MakeCommand:.*-- (.*)\nDefaultCTestConfigurationType" EIGEN_DUMMY ${EIGEN_DART_CONFIG_FILE})
|
||||||
set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests.vcxproj /p:Configuration=\${CTEST_CONFIGURATION_TYPE} ${EIGEN_TEST_BUILD_FLAGS}\n# ")
|
if(NOT CMAKE_MATCH_1)
|
||||||
endif()
|
string(REGEX MATCH "MakeCommand:.*[^c]make (.*)\nDefaultCTestConfigurationType" EIGEN_DUMMY ${EIGEN_DART_CONFIG_FILE})
|
||||||
else()
|
|
||||||
# for make and nmake
|
|
||||||
set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests ${EIGEN_TEST_BUILD_FLAGS}")
|
|
||||||
endif()
|
endif()
|
||||||
|
string(REGEX REPLACE "MakeCommand:.*DefaultCTestConfigurationType" "MakeCommand: ${CMAKE_COMMAND} --build . --target buildtests --config \"\${CTEST_CONFIGURATION_TYPE}\" -- ${CMAKE_MATCH_1} ${EIGEN_TEST_BUILD_FLAGS}\nDefaultCTestConfigurationType"
|
||||||
# copy ctest properties, which currently
|
EIGEN_DART_CONFIG_FILE2 ${EIGEN_DART_CONFIG_FILE})
|
||||||
# o raise the warning levels
|
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" ${EIGEN_DART_CONFIG_FILE2})
|
||||||
configure_file(${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl)
|
|
||||||
|
|
||||||
# restore default CMAKE_MAKE_PROGRAM
|
|
||||||
set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE})
|
|
||||||
# un-set temporary variables so that it is like they never existed.
|
|
||||||
# CMake 2.6.3 introduces the more logical unset() syntax for this.
|
|
||||||
set(CMAKE_MAKE_PROGRAM_SAVE)
|
|
||||||
set(EIGEN_MAKECOMMAND_PLACEHOLDER)
|
|
||||||
|
|
||||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake)
|
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake)
|
||||||
|
|
||||||
|
|
|
@ -322,22 +322,21 @@ macro(ei_get_compilerver VAR)
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
# on all other system we rely on ${CMAKE_CXX_COMPILER}
|
# on all other system we rely on ${CMAKE_CXX_COMPILER}
|
||||||
# supporting a "--version" flag
|
# supporting a "--version" or "/version" flag
|
||||||
|
|
||||||
# check whether the head command exists
|
if(WIN32 AND ${CMAKE_CXX_COMPILER_ID} EQUAL "Intel")
|
||||||
find_program(HEAD_EXE head NO_CMAKE_ENVIRONMENT_PATH NO_CMAKE_PATH NO_CMAKE_SYSTEM_PATH)
|
set(EIGEN_CXX_FLAG_VERSION "/version")
|
||||||
if(HEAD_EXE)
|
|
||||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
|
||||||
COMMAND head -n 1
|
|
||||||
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
|
||||||
else()
|
else()
|
||||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
set(EIGEN_CXX_FLAG_VERSION "--version")
|
||||||
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
|
||||||
string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string})
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION}
|
||||||
|
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string})
|
||||||
|
|
||||||
ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER)
|
ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER)
|
||||||
set(${VAR} "${CNAME}-${CVER}")
|
set(${VAR} "${CNAME}-${CVER}")
|
||||||
|
|
||||||
endif()
|
endif()
|
||||||
endmacro(ei_get_compilerver)
|
endmacro(ei_get_compilerver)
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ macro(_metis_check_version)
|
||||||
string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}")
|
string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}")
|
||||||
set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}")
|
set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}")
|
||||||
if(NOT METIS_MAJOR_VERSION)
|
if(NOT METIS_MAJOR_VERSION)
|
||||||
message(WARNING "Could not determine Metis version. Assuming version 4.0.0")
|
message(STATUS "Could not determine Metis version. Assuming version 4.0.0")
|
||||||
set(METIS_VERSION 4.0.0)
|
set(METIS_VERSION 4.0.0)
|
||||||
else()
|
else()
|
||||||
set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION})
|
set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION})
|
||||||
|
|
|
@ -26,7 +26,12 @@ if(SPQR_LIBRARIES)
|
||||||
find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS $ENV{SPQRDIR} ${LIB_INSTALL_DIR})
|
find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS $ENV{SPQRDIR} ${LIB_INSTALL_DIR})
|
||||||
if (SUITESPARSE_LIBRARY)
|
if (SUITESPARSE_LIBRARY)
|
||||||
set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${SUITESPARSE_LIBRARY})
|
set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${SUITESPARSE_LIBRARY})
|
||||||
endif (SUITESPARSE_LIBRARY)
|
endif()
|
||||||
|
|
||||||
|
find_library(CHOLMOD_LIBRARY cholmod PATHS $ENV{UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
||||||
|
if(CHOLMOD_LIBRARY)
|
||||||
|
set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${CHOLMOD_LIBRARY})
|
||||||
|
endif()
|
||||||
|
|
||||||
endif(SPQR_LIBRARIES)
|
endif(SPQR_LIBRARIES)
|
||||||
|
|
||||||
|
|
|
@ -20,24 +20,29 @@ find_library(UMFPACK_LIBRARIES umfpack PATHS $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}
|
||||||
|
|
||||||
if(UMFPACK_LIBRARIES)
|
if(UMFPACK_LIBRARIES)
|
||||||
|
|
||||||
if (NOT UMFPACK_LIBDIR)
|
if(NOT UMFPACK_LIBDIR)
|
||||||
get_filename_component(UMFPACK_LIBDIR ${UMFPACK_LIBRARIES} PATH)
|
get_filename_component(UMFPACK_LIBDIR ${UMFPACK_LIBRARIES} PATH)
|
||||||
endif(NOT UMFPACK_LIBDIR)
|
endif(NOT UMFPACK_LIBDIR)
|
||||||
|
|
||||||
find_library(COLAMD_LIBRARY colamd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
find_library(COLAMD_LIBRARY colamd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
||||||
if (COLAMD_LIBRARY)
|
if(COLAMD_LIBRARY)
|
||||||
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${COLAMD_LIBRARY})
|
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${COLAMD_LIBRARY})
|
||||||
endif (COLAMD_LIBRARY)
|
endif ()
|
||||||
|
|
||||||
find_library(AMD_LIBRARY amd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
find_library(AMD_LIBRARY amd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
||||||
if (AMD_LIBRARY)
|
if(AMD_LIBRARY)
|
||||||
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${AMD_LIBRARY})
|
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${AMD_LIBRARY})
|
||||||
endif (AMD_LIBRARY)
|
endif ()
|
||||||
|
|
||||||
find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
||||||
if (SUITESPARSE_LIBRARY)
|
if(SUITESPARSE_LIBRARY)
|
||||||
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${SUITESPARSE_LIBRARY})
|
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${SUITESPARSE_LIBRARY})
|
||||||
endif (SUITESPARSE_LIBRARY)
|
endif ()
|
||||||
|
|
||||||
|
find_library(CHOLMOD_LIBRARY cholmod PATHS $ENV{UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR})
|
||||||
|
if(CHOLMOD_LIBRARY)
|
||||||
|
set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${CHOLMOD_LIBRARY})
|
||||||
|
endif()
|
||||||
|
|
||||||
endif(UMFPACK_LIBRARIES)
|
endif(UMFPACK_LIBRARIES)
|
||||||
|
|
||||||
|
@ -45,4 +50,4 @@ include(FindPackageHandleStandardArgs)
|
||||||
find_package_handle_standard_args(UMFPACK DEFAULT_MSG
|
find_package_handle_standard_args(UMFPACK DEFAULT_MSG
|
||||||
UMFPACK_INCLUDES UMFPACK_LIBRARIES)
|
UMFPACK_INCLUDES UMFPACK_LIBRARIES)
|
||||||
|
|
||||||
mark_as_advanced(UMFPACK_INCLUDES UMFPACK_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY)
|
mark_as_advanced(UMFPACK_INCLUDES UMFPACK_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY CHOLMOD_LIBRARY SUITESPARSE_LIBRARY)
|
||||||
|
|
|
@ -91,7 +91,8 @@ add_custom_target(doc ALL
|
||||||
COMMAND doxygen Doxyfile-unsupported
|
COMMAND doxygen Doxyfile-unsupported
|
||||||
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
|
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
|
||||||
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
|
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
|
||||||
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc/eigen-doc.tgz eigen-doc
|
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc.tgz eigen-doc/eigen-doc.tgz
|
||||||
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
|
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
|
||||||
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc)
|
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc)
|
||||||
|
|
||||||
|
|
|
@ -222,7 +222,7 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
|
||||||
"note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \
|
"note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \
|
||||||
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
|
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
|
||||||
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
|
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
|
||||||
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"
|
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\""
|
||||||
|
|
||||||
ALIASES += "eigenAutoToc= "
|
ALIASES += "eigenAutoToc= "
|
||||||
ALIASES += "eigenManualPage=\defgroup"
|
ALIASES += "eigenManualPage=\defgroup"
|
||||||
|
@ -315,7 +315,7 @@ IDL_PROPERTY_SUPPORT = YES
|
||||||
# member in the group (if any) for the other members of the group. By default
|
# member in the group (if any) for the other members of the group. By default
|
||||||
# all members of a group must be documented explicitly.
|
# all members of a group must be documented explicitly.
|
||||||
|
|
||||||
DISTRIBUTE_GROUP_DOC = NO
|
DISTRIBUTE_GROUP_DOC = YES
|
||||||
|
|
||||||
# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
|
# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
|
||||||
# the same type (for instance a group of public functions) to be put as a
|
# the same type (for instance a group of public functions) to be put as a
|
||||||
|
@ -365,7 +365,7 @@ TYPEDEF_HIDES_STRUCT = NO
|
||||||
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
|
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
|
||||||
# corresponding to a cache size of 2^16 = 65536 symbols.
|
# corresponding to a cache size of 2^16 = 65536 symbols.
|
||||||
|
|
||||||
SYMBOL_CACHE_SIZE = 0
|
# SYMBOL_CACHE_SIZE = 0
|
||||||
|
|
||||||
# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
|
# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
|
||||||
# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
|
# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
|
||||||
|
@ -562,7 +562,7 @@ GENERATE_BUGLIST = NO
|
||||||
# disable (NO) the deprecated list. This list is created by putting
|
# disable (NO) the deprecated list. This list is created by putting
|
||||||
# \deprecated commands in the documentation.
|
# \deprecated commands in the documentation.
|
||||||
|
|
||||||
GENERATE_DEPRECATEDLIST= NO
|
GENERATE_DEPRECATEDLIST= YES
|
||||||
|
|
||||||
# The ENABLED_SECTIONS tag can be used to enable conditional
|
# The ENABLED_SECTIONS tag can be used to enable conditional
|
||||||
# documentation sections, marked by \if sectionname ... \endif.
|
# documentation sections, marked by \if sectionname ... \endif.
|
||||||
|
@ -1465,13 +1465,13 @@ XML_OUTPUT = xml
|
||||||
# which can be used by a validating XML parser to check the
|
# which can be used by a validating XML parser to check the
|
||||||
# syntax of the XML files.
|
# syntax of the XML files.
|
||||||
|
|
||||||
XML_SCHEMA =
|
# XML_SCHEMA =
|
||||||
|
|
||||||
# The XML_DTD tag can be used to specify an XML DTD,
|
# The XML_DTD tag can be used to specify an XML DTD,
|
||||||
# which can be used by a validating XML parser to check the
|
# which can be used by a validating XML parser to check the
|
||||||
# syntax of the XML files.
|
# syntax of the XML files.
|
||||||
|
|
||||||
XML_DTD =
|
# XML_DTD =
|
||||||
|
|
||||||
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
|
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
|
||||||
# dump the program listings (including syntax highlighting
|
# dump the program listings (including syntax highlighting
|
||||||
|
@ -1699,7 +1699,7 @@ DOT_NUM_THREADS = 0
|
||||||
# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
|
# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
|
||||||
# directory containing the font.
|
# directory containing the font.
|
||||||
|
|
||||||
DOT_FONTNAME = FreeSans
|
DOT_FONTNAME =
|
||||||
|
|
||||||
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
|
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
|
||||||
# The default size is 10pt.
|
# The default size is 10pt.
|
||||||
|
|
|
@ -11,6 +11,7 @@ namespace Eigen {
|
||||||
- \subpage TopicCustomizingEigen
|
- \subpage TopicCustomizingEigen
|
||||||
- \subpage TopicMultiThreading
|
- \subpage TopicMultiThreading
|
||||||
- \subpage TopicUsingIntelMKL
|
- \subpage TopicUsingIntelMKL
|
||||||
|
- \subpage TopicPitfalls
|
||||||
- \subpage TopicTemplateKeyword
|
- \subpage TopicTemplateKeyword
|
||||||
- \subpage UserManual_UnderstandingEigen
|
- \subpage UserManual_UnderstandingEigen
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
namespace Eigen {
|
||||||
|
|
||||||
|
/** \page TopicPitfalls Common pitfalls
|
||||||
|
|
||||||
|
\section TopicPitfalls_template_keyword Compilation error with template methods
|
||||||
|
|
||||||
|
See this \link TopicTemplateKeyword page \endlink.
|
||||||
|
|
||||||
|
\section TopicPitfalls_auto_keyword C++11 and the auto keyword
|
||||||
|
|
||||||
|
In short: do not use the auto keywords with Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a Matrix<> type. Here is an example:
|
||||||
|
|
||||||
|
\code
|
||||||
|
MatrixXd A, B;
|
||||||
|
auto C = A*B;
|
||||||
|
for(...) { ... w = C * v; ...}
|
||||||
|
\endcode
|
||||||
|
|
||||||
|
In this example, the type of C is not a MatrixXd but an abstract expression representing a matrix product and storing references to A and B. Therefore, the product of A*B will be carried out multiple times, once per iteration of the for loop. Moreover, if the coefficients of A or B change during the iteration, then C will evaluate to different values.
|
||||||
|
|
||||||
|
Here is another example leading to a segfault:
|
||||||
|
\code
|
||||||
|
auto C = ((A+B).eval()).transpose();
|
||||||
|
// do something with C
|
||||||
|
\endcode
|
||||||
|
The problem is that eval() returns a temporary object (in this case a MatrixXd) which is then referenced by the Transpose<> expression. However, this temporary is deleted right after the first line, and there the C expression reference a dead object. The same issue might occur when sub expressions are automatically evaluated by Eigen as in the following example:
|
||||||
|
\code
|
||||||
|
VectorXd u, v;
|
||||||
|
auto C = u + (A*v).normalized();
|
||||||
|
// do something with C
|
||||||
|
\endcode
|
||||||
|
where the normalized() method has to evaluate the expensive product A*v to avoid evaluating it twice. On the other hand, the following example is perfectly fine:
|
||||||
|
\code
|
||||||
|
auto C = (u + (A*v).normalized()).eval();
|
||||||
|
\endcode
|
||||||
|
In this case, C will be a regular VectorXd object.
|
||||||
|
*/
|
||||||
|
}
|
|
@ -17,7 +17,7 @@ You can control the number of thread that will be used using either the OpenMP A
|
||||||
Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode
|
Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode
|
||||||
You can query the number of threads that will be used with:
|
You can query the number of threads that will be used with:
|
||||||
\code
|
\code
|
||||||
n = Eigen::nbThreads(n);
|
n = Eigen::nbThreads( );
|
||||||
\endcode
|
\endcode
|
||||||
You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token.
|
You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token.
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue