commit
31a3c8222a
|
@ -1,4 +1,4 @@
|
||||||
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
||||||
node: b30b87236a1b1552af32ac34075ee5696a9b5a33
|
node: 07105f7124f9aef00a68c85e0fc606e65d3d6c15
|
||||||
branch: 3.2
|
branch: 3.2
|
||||||
tag: 3.2.7
|
tag: 3.2.8
|
||||||
|
|
|
@ -30,3 +30,4 @@ ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
|
||||||
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
|
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
|
||||||
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
|
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
|
||||||
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6
|
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6
|
||||||
|
b30b87236a1b1552af32ac34075ee5696a9b5a33 3.2.7
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
project(Eigen)
|
project(Eigen)
|
||||||
|
cmake_minimum_required(VERSION 2.8.5)
|
||||||
cmake_minimum_required(VERSION 2.8.2)
|
|
||||||
|
|
||||||
# guard against in-source builds
|
# guard against in-source builds
|
||||||
|
|
||||||
|
@ -55,6 +54,7 @@ endif(EIGEN_HG_CHANGESET)
|
||||||
|
|
||||||
|
|
||||||
include(CheckCXXCompilerFlag)
|
include(CheckCXXCompilerFlag)
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||||
|
|
||||||
|
@ -288,25 +288,26 @@ option(EIGEN_TEST_C++0x "Enables all C++0x features." OFF)
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
# the user modifiable install path for header files
|
# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR
|
||||||
set(EIGEN_INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR} CACHE PATH "The directory where we install the header files (optional)")
|
if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR)
|
||||||
|
set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR}
|
||||||
# set the internal install path for header files which depends on wether the user modifiable
|
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed")
|
||||||
# EIGEN_INCLUDE_INSTALL_DIR has been set by the user or not.
|
|
||||||
if(EIGEN_INCLUDE_INSTALL_DIR)
|
|
||||||
set(INCLUDE_INSTALL_DIR
|
|
||||||
${EIGEN_INCLUDE_INSTALL_DIR}
|
|
||||||
CACHE INTERNAL
|
|
||||||
"The directory where we install the header files (internal)"
|
|
||||||
)
|
|
||||||
else()
|
else()
|
||||||
set(INCLUDE_INSTALL_DIR
|
set(INCLUDE_INSTALL_DIR
|
||||||
"include/eigen3"
|
"${CMAKE_INSTALL_INCLUDEDIR}/eigen3"
|
||||||
CACHE INTERNAL
|
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed"
|
||||||
"The directory where we install the header files (internal)"
|
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(CMAKEPACKAGE_INSTALL_DIR
|
||||||
|
"${CMAKE_INSTALL_LIBDIR}/cmake/eigen3"
|
||||||
|
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen3Config.cmake is installed"
|
||||||
|
)
|
||||||
|
set(PKGCONFIG_INSTALL_DIR
|
||||||
|
"${CMAKE_INSTALL_DATADIR}/pkgconfig"
|
||||||
|
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where eigen3.pc is installed"
|
||||||
|
)
|
||||||
|
|
||||||
# similar to set_target_properties but append the property instead of overwriting it
|
# similar to set_target_properties but append the property instead of overwriting it
|
||||||
macro(ei_add_target_property target prop value)
|
macro(ei_add_target_property target prop value)
|
||||||
|
|
||||||
|
@ -324,21 +325,9 @@ install(FILES
|
||||||
)
|
)
|
||||||
|
|
||||||
if(EIGEN_BUILD_PKGCONFIG)
|
if(EIGEN_BUILD_PKGCONFIG)
|
||||||
SET(path_separator ":")
|
configure_file(eigen3.pc.in eigen3.pc @ONLY)
|
||||||
STRING(REPLACE ${path_separator} ";" pkg_config_libdir_search "$ENV{PKG_CONFIG_LIBDIR}")
|
|
||||||
message(STATUS "searching for 'pkgconfig' directory in PKG_CONFIG_LIBDIR ( $ENV{PKG_CONFIG_LIBDIR} ), ${CMAKE_INSTALL_PREFIX}/share, and ${CMAKE_INSTALL_PREFIX}/lib")
|
|
||||||
FIND_PATH(pkg_config_libdir pkgconfig ${pkg_config_libdir_search} ${CMAKE_INSTALL_PREFIX}/share ${CMAKE_INSTALL_PREFIX}/lib ${pkg_config_libdir_search})
|
|
||||||
if(pkg_config_libdir)
|
|
||||||
SET(pkg_config_install_dir ${pkg_config_libdir})
|
|
||||||
message(STATUS "found ${pkg_config_libdir}/pkgconfig" )
|
|
||||||
else(pkg_config_libdir)
|
|
||||||
SET(pkg_config_install_dir ${CMAKE_INSTALL_PREFIX}/share)
|
|
||||||
message(STATUS "pkgconfig not found; installing in ${pkg_config_install_dir}" )
|
|
||||||
endif(pkg_config_libdir)
|
|
||||||
|
|
||||||
configure_file(eigen3.pc.in eigen3.pc)
|
|
||||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc
|
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc
|
||||||
DESTINATION ${pkg_config_install_dir}/pkgconfig
|
DESTINATION ${PKGCONFIG_INSTALL_DIR}
|
||||||
)
|
)
|
||||||
endif(EIGEN_BUILD_PKGCONFIG)
|
endif(EIGEN_BUILD_PKGCONFIG)
|
||||||
|
|
||||||
|
@ -401,12 +390,15 @@ if(cmake_generator_tolower MATCHES "makefile")
|
||||||
message(STATUS "--------------+--------------------------------------------------------------")
|
message(STATUS "--------------+--------------------------------------------------------------")
|
||||||
message(STATUS "Command | Description")
|
message(STATUS "Command | Description")
|
||||||
message(STATUS "--------------+--------------------------------------------------------------")
|
message(STATUS "--------------+--------------------------------------------------------------")
|
||||||
message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:")
|
message(STATUS "make install | Install Eigen. Headers will be installed to:")
|
||||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath")
|
message(STATUS " | <CMAKE_INSTALL_PREFIX>/<INCLUDE_INSTALL_DIR>")
|
||||||
message(STATUS " | Eigen headers will then be installed to:")
|
message(STATUS " | Using the following values:")
|
||||||
message(STATUS " | ${CMAKE_INSTALL_PREFIX}/${INCLUDE_INSTALL_DIR}")
|
message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
|
||||||
message(STATUS " | To install Eigen headers to a separate location, do:")
|
message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}")
|
||||||
message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath")
|
message(STATUS " | Change the install location of Eigen headers using:")
|
||||||
|
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix")
|
||||||
|
message(STATUS " | Or:")
|
||||||
|
message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
|
||||||
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
||||||
message(STATUS "make check | Build and run the unit-tests. Read this page:")
|
message(STATUS "make check | Build and run the unit-tests. Read this page:")
|
||||||
message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
|
message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
|
||||||
|
|
|
@ -12,7 +12,7 @@ extern "C" {
|
||||||
/** \ingroup Support_modules
|
/** \ingroup Support_modules
|
||||||
* \defgroup CholmodSupport_Module CholmodSupport module
|
* \defgroup CholmodSupport_Module CholmodSupport module
|
||||||
*
|
*
|
||||||
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||||
* It provides the two following main factorization classes:
|
* It provides the two following main factorization classes:
|
||||||
* - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
|
* - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
|
||||||
* - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
|
* - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
/** \ingroup Support_modules
|
/** \ingroup Support_modules
|
||||||
* \defgroup SPQRSupport_Module SuiteSparseQR module
|
* \defgroup SPQRSupport_Module SuiteSparseQR module
|
||||||
*
|
*
|
||||||
* This module provides an interface to the SPQR library, which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
* This module provides an interface to the SPQR library, which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||||
*
|
*
|
||||||
* \code
|
* \code
|
||||||
* #include <Eigen/SPQRSupport>
|
* #include <Eigen/SPQRSupport>
|
||||||
|
|
|
@ -12,7 +12,7 @@ extern "C" {
|
||||||
/** \ingroup Support_modules
|
/** \ingroup Support_modules
|
||||||
* \defgroup UmfPackSupport_Module UmfPackSupport module
|
* \defgroup UmfPackSupport_Module UmfPackSupport module
|
||||||
*
|
*
|
||||||
* This module provides an interface to the UmfPack library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
* This module provides an interface to the UmfPack library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||||
* It provides the following factorization class:
|
* It provides the following factorization class:
|
||||||
* - class UmfPackLU: a multifrontal sequential LU factorization.
|
* - class UmfPackLU: a multifrontal sequential LU factorization.
|
||||||
*
|
*
|
||||||
|
|
|
@ -38,7 +38,7 @@ struct traits<CwiseUnaryView<ViewOp, MatrixType> >
|
||||||
typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||||
enum {
|
enum {
|
||||||
Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)),
|
Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)),
|
||||||
CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits<ViewOp>::Cost,
|
CoeffReadCost = EIGEN_ADD_COST(traits<_MatrixTypeNested>::CoeffReadCost, functor_traits<ViewOp>::Cost),
|
||||||
MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
|
MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
|
||||||
// need to cast the sizeof's from size_t to int explicitly, otherwise:
|
// need to cast the sizeof's from size_t to int explicitly, otherwise:
|
||||||
// "error: no integral type can represent all of the enumerator values
|
// "error: no integral type can represent all of the enumerator values
|
||||||
|
|
|
@ -40,7 +40,7 @@ static inline void check_DenseIndex_is_signed() {
|
||||||
*/
|
*/
|
||||||
template<typename Derived> class DenseBase
|
template<typename Derived> class DenseBase
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
: public internal::special_scalar_op_base<Derived, typename internal::traits<Derived>::Scalar,
|
||||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
||||||
DenseCoeffsBase<Derived> >
|
DenseCoeffsBase<Derived> >
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -425,15 +425,18 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
||||||
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
|
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
|
||||||
* RhsBlasTraits::extractScalarFactor(prod.rhs());
|
* RhsBlasTraits::extractScalarFactor(prod.rhs());
|
||||||
|
|
||||||
|
// make sure Dest is a compile-time vector type (bug 1166)
|
||||||
|
typedef typename conditional<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr>::type ActualDest;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
||||||
// on, the other hand it is good for the cache to pack the vector anyways...
|
// on, the other hand it is good for the cache to pack the vector anyways...
|
||||||
EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
|
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
|
||||||
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
|
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
|
||||||
MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
MightCannotUseDest = (ActualDest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||||
};
|
};
|
||||||
|
|
||||||
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
gemv_static_vector_if<ResScalar,ActualDest::SizeAtCompileTime,ActualDest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||||
|
|
||||||
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
||||||
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||||
|
@ -522,7 +525,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
|
||||||
actualLhs.rows(), actualLhs.cols(),
|
actualLhs.rows(), actualLhs.cols(),
|
||||||
actualLhs.data(), actualLhs.outerStride(),
|
actualLhs.data(), actualLhs.outerStride(),
|
||||||
actualRhsPtr, 1,
|
actualRhsPtr, 1,
|
||||||
dest.data(), dest.innerStride(),
|
dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166)
|
||||||
actualAlpha);
|
actualAlpha);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -149,6 +149,10 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||||
checkSanity();
|
checkSanity();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef EIGEN_MAPBASE_PLUGIN
|
||||||
|
#include EIGEN_MAPBASE_PLUGIN
|
||||||
|
#endif
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
void checkSanity() const
|
void checkSanity() const
|
||||||
|
|
|
@ -707,21 +707,21 @@ struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::
|
||||||
|
|
||||||
template<typename Scalar, typename OtherScalar>
|
template<typename Scalar, typename OtherScalar>
|
||||||
inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
|
inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
|
||||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||||
{
|
{
|
||||||
return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
|
return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Scalar>
|
template<typename Scalar>
|
||||||
inline bool isApprox(const Scalar& x, const Scalar& y,
|
inline bool isApprox(const Scalar& x, const Scalar& y,
|
||||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||||
{
|
{
|
||||||
return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
|
return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Scalar>
|
template<typename Scalar>
|
||||||
inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
|
inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
|
||||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||||
{
|
{
|
||||||
return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
|
return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,17 +116,17 @@ template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
|
||||||
struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,false> {
|
struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,false> {
|
||||||
enum {
|
enum {
|
||||||
IsLower = ((Mode&Lower)==Lower),
|
IsLower = ((Mode&Lower)==Lower),
|
||||||
I = IsLower ? Index : Size - Index - 1,
|
RowIndex = IsLower ? Index : Size - Index - 1,
|
||||||
S = IsLower ? 0 : I+1
|
S = IsLower ? 0 : RowIndex+1
|
||||||
};
|
};
|
||||||
static void run(const Lhs& lhs, Rhs& rhs)
|
static void run(const Lhs& lhs, Rhs& rhs)
|
||||||
{
|
{
|
||||||
if (Index>0)
|
if (Index>0)
|
||||||
rhs.coeffRef(I) -= lhs.row(I).template segment<Index>(S).transpose()
|
rhs.coeffRef(RowIndex) -= lhs.row(RowIndex).template segment<Index>(S).transpose()
|
||||||
.cwiseProduct(rhs.template segment<Index>(S)).sum();
|
.cwiseProduct(rhs.template segment<Index>(S)).sum();
|
||||||
|
|
||||||
if(!(Mode & UnitDiag))
|
if(!(Mode & UnitDiag))
|
||||||
rhs.coeffRef(I) /= lhs.coeff(I,I);
|
rhs.coeffRef(RowIndex) /= lhs.coeff(RowIndex,RowIndex);
|
||||||
|
|
||||||
triangular_solver_unroller<Lhs,Rhs,Mode,Index+1,Size>::run(lhs,rhs);
|
triangular_solver_unroller<Lhs,Rhs,Mode,Index+1,Size>::run(lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,14 +76,17 @@ template<typename Derived>
|
||||||
template<typename Visitor>
|
template<typename Visitor>
|
||||||
void DenseBase<Derived>::visit(Visitor& visitor) const
|
void DenseBase<Derived>::visit(Visitor& visitor) const
|
||||||
{
|
{
|
||||||
|
typedef typename internal::remove_all<typename Derived::Nested>::type ThisNested;
|
||||||
|
typename Derived::Nested thisNested(derived());
|
||||||
|
|
||||||
enum { unroll = SizeAtCompileTime != Dynamic
|
enum { unroll = SizeAtCompileTime != Dynamic
|
||||||
&& CoeffReadCost != Dynamic
|
&& CoeffReadCost != Dynamic
|
||||||
&& (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
|
&& (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
|
||||||
&& SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
|
&& SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
|
||||||
<= EIGEN_UNROLLING_LIMIT };
|
<= EIGEN_UNROLLING_LIMIT };
|
||||||
return internal::visitor_impl<Visitor, Derived,
|
return internal::visitor_impl<Visitor, ThisNested,
|
||||||
unroll ? int(SizeAtCompileTime) : Dynamic
|
unroll ? int(SizeAtCompileTime) : Dynamic
|
||||||
>::run(derived(), visitor);
|
>::run(thisNested, visitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
|
@ -235,63 +235,27 @@ template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { E
|
||||||
return _mm_loadu_ps(from);
|
return _mm_loadu_ps(from);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
|
|
||||||
#else
|
#else
|
||||||
// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
|
|
||||||
// require pointer casting to incompatible pointer types and leads to invalid code
|
|
||||||
// because of the strict aliasing rule. The "dummy" stuff are required to enforce
|
|
||||||
// a correct instruction dependency.
|
|
||||||
// TODO: do the same for MSVC (ICC is compatible)
|
|
||||||
// NOTE: with the code below, MSVC's compiler crashes!
|
// NOTE: with the code below, MSVC's compiler crashes!
|
||||||
|
|
||||||
#if defined(__GNUC__) && defined(__i386__)
|
|
||||||
// bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
|
|
||||||
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
|
|
||||||
#elif defined(__clang__)
|
|
||||||
// bug 201: Segfaults in __mm_loadh_pd with clang 2.8
|
|
||||||
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
|
|
||||||
#else
|
|
||||||
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
|
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
|
||||||
{
|
{
|
||||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||||
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
|
|
||||||
return _mm_loadu_ps(from);
|
return _mm_loadu_ps(from);
|
||||||
#else
|
|
||||||
__m128d res;
|
|
||||||
res = _mm_load_sd((const double*)(from)) ;
|
|
||||||
res = _mm_loadh_pd(res, (const double*)(from+2)) ;
|
|
||||||
return _mm_castpd_ps(res);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
|
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
|
||||||
{
|
{
|
||||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||||
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
|
|
||||||
return _mm_loadu_pd(from);
|
return _mm_loadu_pd(from);
|
||||||
#else
|
|
||||||
__m128d res;
|
|
||||||
res = _mm_load_sd(from) ;
|
|
||||||
res = _mm_loadh_pd(res,from+1);
|
|
||||||
return res;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
||||||
{
|
{
|
||||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||||
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
|
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
|
||||||
return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
|
|
||||||
#else
|
|
||||||
__m128d res;
|
|
||||||
res = _mm_load_sd((const double*)(from)) ;
|
|
||||||
res = _mm_loadh_pd(res, (const double*)(from+2)) ;
|
|
||||||
return _mm_castpd_si128(res);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
||||||
{
|
{
|
||||||
|
|
|
@ -140,8 +140,10 @@ static void run(Index rows, Index cols, Index depth,
|
||||||
// Release all the sub blocks B'_j of B' for the current thread,
|
// Release all the sub blocks B'_j of B' for the current thread,
|
||||||
// i.e., we simply decrement the number of users by 1
|
// i.e., we simply decrement the number of users by 1
|
||||||
for(Index j=0; j<threads; ++j)
|
for(Index j=0; j<threads; ++j)
|
||||||
|
{
|
||||||
#pragma omp atomic
|
#pragma omp atomic
|
||||||
--(info[j].users);
|
info[j].users -= 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -390,13 +392,17 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
|
||||||
|
|
||||||
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
|
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
|
||||||
{
|
{
|
||||||
|
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
|
||||||
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
|
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
|
||||||
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
|
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||||
{
|
{
|
||||||
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
||||||
|
if(m_lhs.cols()==0 || m_lhs.rows()==0 || m_rhs.cols()==0)
|
||||||
|
return;
|
||||||
|
|
||||||
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
|
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
|
||||||
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
|
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
|
||||||
|
|
|
@ -115,8 +115,9 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
||||||
{
|
{
|
||||||
// TODO write a small kernel handling this (can be shared with trsv)
|
// TODO write a small kernel handling this (can be shared with trsv)
|
||||||
Index i = IsLower ? k2+k1+k : k2-k1-k-1;
|
Index i = IsLower ? k2+k1+k : k2-k1-k-1;
|
||||||
Index s = IsLower ? k2+k1 : i+1;
|
|
||||||
Index rs = actualPanelWidth - k - 1; // remaining size
|
Index rs = actualPanelWidth - k - 1; // remaining size
|
||||||
|
Index s = TriStorageOrder==RowMajor ? (IsLower ? k2+k1 : i+1)
|
||||||
|
: IsLower ? i+1 : i-rs;
|
||||||
|
|
||||||
Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
|
Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
|
||||||
for (Index j=j2; j<j2+actual_cols; ++j)
|
for (Index j=j2; j<j2+actual_cols; ++j)
|
||||||
|
@ -133,7 +134,6 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Index s = IsLower ? i+1 : i-rs;
|
|
||||||
Scalar b = (other(i,j) *= a);
|
Scalar b = (other(i,j) *= a);
|
||||||
Scalar* r = &other(s,j);
|
Scalar* r = &other(s,j);
|
||||||
const Scalar* l = &tri(s,i);
|
const Scalar* l = &tri(s,i);
|
||||||
|
|
|
@ -13,23 +13,292 @@
|
||||||
|
|
||||||
#define EIGEN_WORLD_VERSION 3
|
#define EIGEN_WORLD_VERSION 3
|
||||||
#define EIGEN_MAJOR_VERSION 2
|
#define EIGEN_MAJOR_VERSION 2
|
||||||
#define EIGEN_MINOR_VERSION 7
|
#define EIGEN_MINOR_VERSION 8
|
||||||
|
|
||||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||||
EIGEN_MINOR_VERSION>=z))))
|
EIGEN_MINOR_VERSION>=z))))
|
||||||
|
|
||||||
|
|
||||||
|
// Compiler identification, EIGEN_COMP_*
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
|
||||||
#ifdef __GNUC__
|
#ifdef __GNUC__
|
||||||
#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
|
#define EIGEN_COMP_GNUC 1
|
||||||
#else
|
#else
|
||||||
#define EIGEN_GNUC_AT_LEAST(x,y) 0
|
#define EIGEN_COMP_GNUC 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __GNUC__
|
/// \internal EIGEN_COMP_CLANG set to 1 if the compiler is clang (alias for __clang__)
|
||||||
#define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
|
#if defined(__clang__)
|
||||||
|
#define EIGEN_COMP_CLANG 1
|
||||||
#else
|
#else
|
||||||
#define EIGEN_GNUC_AT_MOST(x,y) 0
|
#define EIGEN_COMP_CLANG 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm
|
||||||
|
#if defined(__llvm__)
|
||||||
|
#define EIGEN_COMP_LLVM 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_LLVM 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise
|
||||||
|
#if defined(__INTEL_COMPILER)
|
||||||
|
#define EIGEN_COMP_ICC __INTEL_COMPILER
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_ICC 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw
|
||||||
|
#if defined(__MINGW32__)
|
||||||
|
#define EIGEN_COMP_MINGW 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_MINGW 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio
|
||||||
|
#if defined(__SUNPRO_CC)
|
||||||
|
#define EIGEN_COMP_SUNCC 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_SUNCC 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise.
|
||||||
|
#if defined(_MSC_VER)
|
||||||
|
#define EIGEN_COMP_MSVC _MSC_VER
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_MSVC 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC
|
||||||
|
#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC)
|
||||||
|
#define EIGEN_COMP_MSVC_STRICT _MSC_VER
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_MSVC_STRICT 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
|
||||||
|
#if defined(__IBMCPP__) || defined(__xlc__)
|
||||||
|
#define EIGEN_COMP_IBM 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_IBM 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
|
||||||
|
#if defined(__PGI)
|
||||||
|
#define EIGEN_COMP_PGI 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_PGI 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler
|
||||||
|
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
|
||||||
|
#define EIGEN_COMP_ARM 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_ARM 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)
|
||||||
|
#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM )
|
||||||
|
#define EIGEN_COMP_GNUC_STRICT 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_COMP_GNUC_STRICT 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#if EIGEN_COMP_GNUC
|
||||||
|
#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
|
||||||
|
#define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
|
||||||
|
#define EIGEN_GNUC_AT(x,y) ( __GNUC__==x && __GNUC_MINOR__==y )
|
||||||
|
#else
|
||||||
|
#define EIGEN_GNUC_AT_LEAST(x,y) 0
|
||||||
|
#define EIGEN_GNUC_AT_MOST(x,y) 0
|
||||||
|
#define EIGEN_GNUC_AT(x,y) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// FIXME: could probably be removed as we do not support gcc 3.x anymore
|
||||||
|
#if EIGEN_COMP_GNUC && (__GNUC__ <= 3)
|
||||||
|
#define EIGEN_GCC3_OR_OLDER 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_GCC3_OR_OLDER 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// Architecture identification, EIGEN_ARCH_*
|
||||||
|
|
||||||
|
#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
|
||||||
|
#define EIGEN_ARCH_x86_64 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_x86_64 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)
|
||||||
|
#define EIGEN_ARCH_i386 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_i386 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_i386
|
||||||
|
#define EIGEN_ARCH_i386_OR_x86_64 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_i386_OR_x86_64 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_ARCH_ARM set to 1 if the architecture is ARM
|
||||||
|
#if defined(__arm__)
|
||||||
|
#define EIGEN_ARCH_ARM 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_ARM 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64
|
||||||
|
#if defined(__aarch64__)
|
||||||
|
#define EIGEN_ARCH_ARM64 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_ARM64 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64
|
||||||
|
#define EIGEN_ARCH_ARM_OR_ARM64 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_ARM_OR_ARM64 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS
|
||||||
|
#if defined(__mips__) || defined(__mips)
|
||||||
|
#define EIGEN_ARCH_MIPS 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_MIPS 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC
|
||||||
|
#if defined(__sparc__) || defined(__sparc)
|
||||||
|
#define EIGEN_ARCH_SPARC 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_SPARC 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium
|
||||||
|
#if defined(__ia64__)
|
||||||
|
#define EIGEN_ARCH_IA64 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_IA64 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC
|
||||||
|
#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)
|
||||||
|
#define EIGEN_ARCH_PPC 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_ARCH_PPC 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Operating system identification, EIGEN_OS_*
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
|
||||||
|
#if defined(__unix__) || defined(__unix)
|
||||||
|
#define EIGEN_OS_UNIX 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_UNIX 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel
|
||||||
|
#if defined(__linux__)
|
||||||
|
#define EIGEN_OS_LINUX 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_LINUX 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_ANDROID set to 1 if the OS is Android
|
||||||
|
// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain.
|
||||||
|
#if defined(__ANDROID__) || defined(ANDROID)
|
||||||
|
#define EIGEN_OS_ANDROID 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_ANDROID 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android)
|
||||||
|
#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID)
|
||||||
|
#define EIGEN_OS_GNULINUX 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_GNULINUX 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant
|
||||||
|
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)
|
||||||
|
#define EIGEN_OS_BSD 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_BSD 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_MAC set to 1 if the OS is MacOS
|
||||||
|
#if defined(__APPLE__)
|
||||||
|
#define EIGEN_OS_MAC 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_MAC 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_QNX set to 1 if the OS is QNX
|
||||||
|
#if defined(__QNX__)
|
||||||
|
#define EIGEN_OS_QNX 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_QNX 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_WIN set to 1 if the OS is Windows based
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#define EIGEN_OS_WIN 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_WIN 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits
|
||||||
|
#if defined(_WIN64)
|
||||||
|
#define EIGEN_OS_WIN64 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_WIN64 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE
|
||||||
|
#if defined(_WIN32_WCE)
|
||||||
|
#define EIGEN_OS_WINCE 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_WINCE 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin
|
||||||
|
#if defined(__CYGWIN__)
|
||||||
|
#define EIGEN_OS_CYGWIN 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_CYGWIN 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants
|
||||||
|
#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN )
|
||||||
|
#define EIGEN_OS_WIN_STRICT 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_WIN_STRICT 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_SUN set to 1 if the OS is SUN
|
||||||
|
#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))
|
||||||
|
#define EIGEN_OS_SUN 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_SUN 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// \internal EIGEN_OS_SOLARIS set to 1 if the OS is Solaris
|
||||||
|
#if (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))
|
||||||
|
#define EIGEN_OS_SOLARIS 1
|
||||||
|
#else
|
||||||
|
#define EIGEN_OS_SOLARIS 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
|
#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
|
||||||
// see bug 89
|
// see bug 89
|
||||||
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
|
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
|
||||||
|
@ -37,12 +306,6 @@
|
||||||
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
|
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__GNUC__) && (__GNUC__ <= 3)
|
|
||||||
#define EIGEN_GCC3_OR_OLDER 1
|
|
||||||
#else
|
|
||||||
#define EIGEN_GCC3_OR_OLDER 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
|
// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
|
||||||
// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
|
// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
|
||||||
// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
|
// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
|
||||||
|
@ -104,7 +367,7 @@
|
||||||
|
|
||||||
// Do we support r-value references?
|
// Do we support r-value references?
|
||||||
#if (__has_feature(cxx_rvalue_references) || \
|
#if (__has_feature(cxx_rvalue_references) || \
|
||||||
defined(__GXX_EXPERIMENTAL_CXX0X__) || \
|
(defined(__cplusplus) && __cplusplus >= 201103L) || \
|
||||||
(defined(_MSC_VER) && _MSC_VER >= 1600))
|
(defined(_MSC_VER) && _MSC_VER >= 1600))
|
||||||
#define EIGEN_HAVE_RVALUE_REFERENCES
|
#define EIGEN_HAVE_RVALUE_REFERENCES
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
|
|
||||||
#ifndef EIGEN_NO_STATIC_ASSERT
|
#ifndef EIGEN_NO_STATIC_ASSERT
|
||||||
|
|
||||||
#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
|
#if __has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600)
|
||||||
|
|
||||||
// if native static_assert is enabled, let's use it
|
// if native static_assert is enabled, let's use it
|
||||||
#define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
|
#define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
|
||||||
|
|
|
@ -45,7 +45,6 @@ ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
|
||||||
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
|
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
|
||||||
{ \
|
{ \
|
||||||
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
|
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
|
||||||
typedef MatrixType::Scalar Scalar; \
|
|
||||||
typedef MatrixType::RealScalar RealScalar; \
|
typedef MatrixType::RealScalar RealScalar; \
|
||||||
typedef std::complex<RealScalar> ComplexScalar; \
|
typedef std::complex<RealScalar> ComplexScalar; \
|
||||||
\
|
\
|
||||||
|
|
|
@ -44,10 +44,6 @@ template<> inline \
|
||||||
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
|
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
|
||||||
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
|
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
|
||||||
{ \
|
{ \
|
||||||
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
|
|
||||||
typedef MatrixType::Scalar Scalar; \
|
|
||||||
typedef MatrixType::RealScalar RealScalar; \
|
|
||||||
\
|
|
||||||
eigen_assert(matrix.cols() == matrix.rows()); \
|
eigen_assert(matrix.cols() == matrix.rows()); \
|
||||||
\
|
\
|
||||||
lapack_int n = matrix.cols(), sdim, info; \
|
lapack_int n = matrix.cols(), sdim, info; \
|
||||||
|
|
|
@ -83,10 +83,17 @@ public:
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
|
inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
|
||||||
|
|
||||||
|
/** \returns the value of the rotation angle in radian */
|
||||||
Scalar angle() const { return m_angle; }
|
Scalar angle() const { return m_angle; }
|
||||||
|
/** \returns a read-write reference to the stored angle in radian */
|
||||||
Scalar& angle() { return m_angle; }
|
Scalar& angle() { return m_angle; }
|
||||||
|
|
||||||
|
/** \returns the rotation axis */
|
||||||
const Vector3& axis() const { return m_axis; }
|
const Vector3& axis() const { return m_axis; }
|
||||||
|
/** \returns a read-write reference to the stored rotation axis.
|
||||||
|
*
|
||||||
|
* \warning The rotation axis must remain a \b unit vector.
|
||||||
|
*/
|
||||||
Vector3& axis() { return m_axis; }
|
Vector3& axis() { return m_axis; }
|
||||||
|
|
||||||
/** Concatenates two rotations */
|
/** Concatenates two rotations */
|
||||||
|
|
|
@ -129,7 +129,7 @@ public:
|
||||||
* determined by \a prec.
|
* determined by \a prec.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::isApprox() */
|
* \sa MatrixBase::isApprox() */
|
||||||
bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
|
bool isApprox(const ParametrizedLine& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
|
||||||
{ return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
|
{ return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
@ -102,15 +102,15 @@ template<int Mode> struct transform_make_affine;
|
||||||
*
|
*
|
||||||
* However, unlike a plain matrix, the Transform class provides many features
|
* However, unlike a plain matrix, the Transform class provides many features
|
||||||
* simplifying both its assembly and usage. In particular, it can be composed
|
* simplifying both its assembly and usage. In particular, it can be composed
|
||||||
* with any other transformations (Transform,Translation,RotationBase,Matrix)
|
* with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix)
|
||||||
* and can be directly used to transform implicit homogeneous vectors. All these
|
* and can be directly used to transform implicit homogeneous vectors. All these
|
||||||
* operations are handled via the operator*. For the composition of transformations,
|
* operations are handled via the operator*. For the composition of transformations,
|
||||||
* its principle consists to first convert the right/left hand sides of the product
|
* its principle consists to first convert the right/left hand sides of the product
|
||||||
* to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
|
* to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
|
||||||
* Of course, internally, operator* tries to perform the minimal number of operations
|
* Of course, internally, operator* tries to perform the minimal number of operations
|
||||||
* according to the nature of each terms. Likewise, when applying the transform
|
* according to the nature of each terms. Likewise, when applying the transform
|
||||||
* to non homogeneous vectors, the latters are automatically promoted to homogeneous
|
* to points, the latters are automatically promoted to homogeneous vectors
|
||||||
* one before doing the matrix product. The convertions to homogeneous representations
|
* before doing the matrix product. The conventions to homogeneous representations
|
||||||
* are performed as follow:
|
* are performed as follow:
|
||||||
*
|
*
|
||||||
* \b Translation t (Dim)x(1):
|
* \b Translation t (Dim)x(1):
|
||||||
|
@ -124,7 +124,7 @@ template<int Mode> struct transform_make_affine;
|
||||||
* R & 0\\
|
* R & 0\\
|
||||||
* 0\,...\,0 & 1
|
* 0\,...\,0 & 1
|
||||||
* \end{array} \right) \f$
|
* \end{array} \right) \f$
|
||||||
*
|
*<!--
|
||||||
* \b Linear \b Matrix L (Dim)x(Dim):
|
* \b Linear \b Matrix L (Dim)x(Dim):
|
||||||
* \f$ \left( \begin{array}{cc}
|
* \f$ \left( \begin{array}{cc}
|
||||||
* L & 0\\
|
* L & 0\\
|
||||||
|
@ -136,14 +136,20 @@ template<int Mode> struct transform_make_affine;
|
||||||
* A\\
|
* A\\
|
||||||
* 0\,...\,0\,1
|
* 0\,...\,0\,1
|
||||||
* \end{array} \right) \f$
|
* \end{array} \right) \f$
|
||||||
|
*-->
|
||||||
|
* \b Scaling \b DiagonalMatrix S (Dim)x(Dim):
|
||||||
|
* \f$ \left( \begin{array}{cc}
|
||||||
|
* S & 0\\
|
||||||
|
* 0\,...\,0 & 1
|
||||||
|
* \end{array} \right) \f$
|
||||||
*
|
*
|
||||||
* \b Column \b vector v (Dim)x(1):
|
* \b Column \b point v (Dim)x(1):
|
||||||
* \f$ \left( \begin{array}{c}
|
* \f$ \left( \begin{array}{c}
|
||||||
* v\\
|
* v\\
|
||||||
* 1
|
* 1
|
||||||
* \end{array} \right) \f$
|
* \end{array} \right) \f$
|
||||||
*
|
*
|
||||||
* \b Set \b of \b column \b vectors V1...Vn (Dim)x(n):
|
* \b Set \b of \b column \b points V1...Vn (Dim)x(n):
|
||||||
* \f$ \left( \begin{array}{ccc}
|
* \f$ \left( \begin{array}{ccc}
|
||||||
* v_1 & ... & v_n\\
|
* v_1 & ... & v_n\\
|
||||||
* 1 & ... & 1
|
* 1 & ... & 1
|
||||||
|
@ -384,26 +390,39 @@ public:
|
||||||
/** \returns a writable expression of the translation vector of the transformation */
|
/** \returns a writable expression of the translation vector of the transformation */
|
||||||
inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
|
inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
|
||||||
|
|
||||||
/** \returns an expression of the product between the transform \c *this and a matrix expression \a other
|
/** \returns an expression of the product between the transform \c *this and a matrix expression \a other.
|
||||||
*
|
*
|
||||||
* The right hand side \a other might be either:
|
* The right-hand-side \a other can be either:
|
||||||
* \li a vector of size Dim,
|
|
||||||
* \li an homogeneous vector of size Dim+1,
|
* \li an homogeneous vector of size Dim+1,
|
||||||
* \li a set of vectors of size Dim x Dynamic,
|
* \li a set of homogeneous vectors of size Dim+1 x N,
|
||||||
* \li a set of homogeneous vectors of size Dim+1 x Dynamic,
|
|
||||||
* \li a linear transformation matrix of size Dim x Dim,
|
|
||||||
* \li an affine transformation matrix of size Dim x Dim+1,
|
|
||||||
* \li a transformation matrix of size Dim+1 x Dim+1.
|
* \li a transformation matrix of size Dim+1 x Dim+1.
|
||||||
|
*
|
||||||
|
* Moreover, if \c *this represents an affine transformation (i.e., Mode!=Projective), then \a other can also be:
|
||||||
|
* \li a point of size Dim (computes: \code this->linear() * other + this->translation()\endcode),
|
||||||
|
* \li a set of N points as a Dim x N matrix (computes: \code (this->linear() * other).colwise() + this->translation()\endcode),
|
||||||
|
*
|
||||||
|
* In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \a other.
|
||||||
|
*
|
||||||
|
* If you want to interpret \a other as a linear or affine transformation, then first convert it to a Transform<> type,
|
||||||
|
* or do your own cooking.
|
||||||
|
*
|
||||||
|
* Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only:
|
||||||
|
* \code
|
||||||
|
* Affine3f A;
|
||||||
|
* Vector3f v1, v2;
|
||||||
|
* v2 = A.linear() * v1;
|
||||||
|
* \endcode
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
// note: this function is defined here because some compilers cannot find the respective declaration
|
// note: this function is defined here because some compilers cannot find the respective declaration
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
|
EIGEN_STRONG_INLINE const typename OtherDerived::PlainObject
|
||||||
operator * (const EigenBase<OtherDerived> &other) const
|
operator * (const EigenBase<OtherDerived> &other) const
|
||||||
{ return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
|
{ return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
|
||||||
|
|
||||||
/** \returns the product expression of a transformation matrix \a a times a transform \a b
|
/** \returns the product expression of a transformation matrix \a a times a transform \a b
|
||||||
*
|
*
|
||||||
* The left hand side \a other might be either:
|
* The left hand side \a other can be either:
|
||||||
* \li a linear transformation matrix of size Dim x Dim,
|
* \li a linear transformation matrix of size Dim x Dim,
|
||||||
* \li an affine transformation matrix of size Dim x Dim+1,
|
* \li an affine transformation matrix of size Dim x Dim+1,
|
||||||
* \li a general transformation matrix of size Dim+1 x Dim+1.
|
* \li a general transformation matrix of size Dim+1 x Dim+1.
|
||||||
|
|
|
@ -162,7 +162,7 @@ public:
|
||||||
* determined by \a prec.
|
* determined by \a prec.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::isApprox() */
|
* \sa MatrixBase::isApprox() */
|
||||||
bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
|
bool isApprox(const Translation& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
|
||||||
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
|
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -139,6 +139,8 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
||||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||||
* One can control the start using the solveWithGuess() method.
|
* One can control the start using the solveWithGuess() method.
|
||||||
*
|
*
|
||||||
|
* ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||||
|
*
|
||||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||||
*/
|
*/
|
||||||
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
||||||
|
|
|
@ -688,7 +688,7 @@ struct solve_retval<FullPivLU<_MatrixType>, Rhs>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const Index rows = dec().rows(), cols = dec().cols(),
|
const Index rows = dec().rows(), cols = dec().cols(),
|
||||||
nonzero_pivots = dec().nonzeroPivots();
|
nonzero_pivots = dec().rank();
|
||||||
eigen_assert(rhs().rows() == rows);
|
eigen_assert(rhs().rows() == rows);
|
||||||
const Index smalldim = (std::min)(rows, cols);
|
const Index smalldim = (std::min)(rows, cols);
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
NOTE: this routine has been adapted from the CSparse library:
|
NOTE: this routine has been adapted from the CSparse library:
|
||||||
|
|
||||||
Copyright (c) 2006, Timothy A. Davis.
|
Copyright (c) 2006, Timothy A. Davis.
|
||||||
http://www.cise.ufl.edu/research/sparse/CSparse
|
http://www.suitesparse.com
|
||||||
|
|
||||||
CSparse is free software; you can redistribute it and/or
|
CSparse is free software; you can redistribute it and/or
|
||||||
modify it under the terms of the GNU Lesser General Public
|
modify it under the terms of the GNU Lesser General Public
|
||||||
|
|
|
@ -41,12 +41,8 @@
|
||||||
//
|
//
|
||||||
// The colamd/symamd library is available at
|
// The colamd/symamd library is available at
|
||||||
//
|
//
|
||||||
// http://www.cise.ufl.edu/research/sparse/colamd/
|
// http://www.suitesparse.com
|
||||||
|
|
||||||
// This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
|
|
||||||
// file. It is required by the colamd.c, colamdmex.c, and symamdmex.c
|
|
||||||
// files, and by any C code that calls the routines whose prototypes are
|
|
||||||
// listed below, or that uses the colamd/symamd definitions listed below.
|
|
||||||
|
|
||||||
#ifndef EIGEN_COLAMD_H
|
#ifndef EIGEN_COLAMD_H
|
||||||
#define EIGEN_COLAMD_H
|
#define EIGEN_COLAMD_H
|
||||||
|
@ -102,9 +98,6 @@ namespace internal {
|
||||||
/* === Definitions ========================================================== */
|
/* === Definitions ========================================================== */
|
||||||
/* ========================================================================== */
|
/* ========================================================================== */
|
||||||
|
|
||||||
#define COLAMD_MAX(a,b) (((a) > (b)) ? (a) : (b))
|
|
||||||
#define COLAMD_MIN(a,b) (((a) < (b)) ? (a) : (b))
|
|
||||||
|
|
||||||
#define ONES_COMPLEMENT(r) (-(r)-1)
|
#define ONES_COMPLEMENT(r) (-(r)-1)
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------------- */
|
||||||
|
@ -516,7 +509,7 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */
|
||||||
Col [col].start = p [col] ;
|
Col [col].start = p [col] ;
|
||||||
Col [col].length = p [col+1] - p [col] ;
|
Col [col].length = p [col+1] - p [col] ;
|
||||||
|
|
||||||
if (Col [col].length < 0)
|
if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200
|
||||||
{
|
{
|
||||||
/* column pointers must be non-decreasing */
|
/* column pointers must be non-decreasing */
|
||||||
stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
|
stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
|
||||||
|
@ -739,8 +732,8 @@ static void init_scoring
|
||||||
|
|
||||||
/* === Extract knobs ==================================================== */
|
/* === Extract knobs ==================================================== */
|
||||||
|
|
||||||
dense_row_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ;
|
dense_row_count = std::max<Index>(0, (std::min)(Index(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ;
|
||||||
dense_col_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ;
|
dense_col_count = std::max<Index>(0, (std::min)(Index(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ;
|
||||||
COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
|
COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
|
||||||
max_deg = 0 ;
|
max_deg = 0 ;
|
||||||
n_col2 = n_col ;
|
n_col2 = n_col ;
|
||||||
|
@ -804,7 +797,7 @@ static void init_scoring
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* keep track of max degree of remaining rows */
|
/* keep track of max degree of remaining rows */
|
||||||
max_deg = COLAMD_MAX (max_deg, deg) ;
|
max_deg = (std::max)(max_deg, deg) ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
|
COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
|
||||||
|
@ -842,7 +835,7 @@ static void init_scoring
|
||||||
/* add row's external degree */
|
/* add row's external degree */
|
||||||
score += Row [row].shared1.degree - 1 ;
|
score += Row [row].shared1.degree - 1 ;
|
||||||
/* guard against integer overflow */
|
/* guard against integer overflow */
|
||||||
score = COLAMD_MIN (score, n_col) ;
|
score = (std::min)(score, n_col) ;
|
||||||
}
|
}
|
||||||
/* determine pruned column length */
|
/* determine pruned column length */
|
||||||
col_length = (Index) (new_cp - &A [Col [c].start]) ;
|
col_length = (Index) (new_cp - &A [Col [c].start]) ;
|
||||||
|
@ -914,7 +907,7 @@ static void init_scoring
|
||||||
head [score] = c ;
|
head [score] = c ;
|
||||||
|
|
||||||
/* see if this score is less than current min */
|
/* see if this score is less than current min */
|
||||||
min_score = COLAMD_MIN (min_score, score) ;
|
min_score = (std::min)(min_score, score) ;
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1040,7 +1033,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||||
|
|
||||||
/* === Garbage_collection, if necessary ============================= */
|
/* === Garbage_collection, if necessary ============================= */
|
||||||
|
|
||||||
needed_memory = COLAMD_MIN (pivot_col_score, n_col - k) ;
|
needed_memory = (std::min)(pivot_col_score, n_col - k) ;
|
||||||
if (pfree + needed_memory >= Alen)
|
if (pfree + needed_memory >= Alen)
|
||||||
{
|
{
|
||||||
pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
|
pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
|
||||||
|
@ -1099,7 +1092,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||||
|
|
||||||
/* clear tag on pivot column */
|
/* clear tag on pivot column */
|
||||||
Col [pivot_col].shared1.thickness = pivot_col_thickness ;
|
Col [pivot_col].shared1.thickness = pivot_col_thickness ;
|
||||||
max_deg = COLAMD_MAX (max_deg, pivot_row_degree) ;
|
max_deg = (std::max)(max_deg, pivot_row_degree) ;
|
||||||
|
|
||||||
|
|
||||||
/* === Kill all rows used to construct pivot row ==================== */
|
/* === Kill all rows used to construct pivot row ==================== */
|
||||||
|
@ -1273,7 +1266,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||||
/* add set difference */
|
/* add set difference */
|
||||||
cur_score += row_mark - tag_mark ;
|
cur_score += row_mark - tag_mark ;
|
||||||
/* integer overflow... */
|
/* integer overflow... */
|
||||||
cur_score = COLAMD_MIN (cur_score, n_col) ;
|
cur_score = (std::min)(cur_score, n_col) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* recompute the column's length */
|
/* recompute the column's length */
|
||||||
|
@ -1386,7 +1379,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||||
cur_score -= Col [col].shared1.thickness ;
|
cur_score -= Col [col].shared1.thickness ;
|
||||||
|
|
||||||
/* make sure score is less or equal than the max score */
|
/* make sure score is less or equal than the max score */
|
||||||
cur_score = COLAMD_MIN (cur_score, max_score) ;
|
cur_score = (std::min)(cur_score, max_score) ;
|
||||||
COLAMD_ASSERT (cur_score >= 0) ;
|
COLAMD_ASSERT (cur_score >= 0) ;
|
||||||
|
|
||||||
/* store updated score */
|
/* store updated score */
|
||||||
|
@ -1409,7 +1402,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||||
head [cur_score] = col ;
|
head [cur_score] = col ;
|
||||||
|
|
||||||
/* see if this score is less than current min */
|
/* see if this score is less than current min */
|
||||||
min_score = COLAMD_MIN (min_score, cur_score) ;
|
min_score = (std::min)(min_score, cur_score) ;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,6 @@ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynami
|
||||||
{ \
|
{ \
|
||||||
using std::abs; \
|
using std::abs; \
|
||||||
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
|
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
|
||||||
typedef MatrixType::Scalar Scalar; \
|
|
||||||
typedef MatrixType::RealScalar RealScalar; \
|
typedef MatrixType::RealScalar RealScalar; \
|
||||||
Index rows = matrix.rows();\
|
Index rows = matrix.rows();\
|
||||||
Index cols = matrix.cols();\
|
Index cols = matrix.cols();\
|
||||||
|
|
|
@ -816,7 +816,7 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
|
||||||
|
|
||||||
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
|
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
|
||||||
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
|
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
|
||||||
if(m_cols!=m_cols) m_scaledMatrix.resize(rows,cols);
|
if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename MatrixType, int QRPreconditioner>
|
template<typename MatrixType, int QRPreconditioner>
|
||||||
|
|
|
@ -45,8 +45,8 @@ JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPiv
|
||||||
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
|
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
|
||||||
{ \
|
{ \
|
||||||
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
|
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
|
||||||
typedef MatrixType::Scalar Scalar; \
|
/*typedef MatrixType::Scalar Scalar;*/ \
|
||||||
typedef MatrixType::RealScalar RealScalar; \
|
/*typedef MatrixType::RealScalar RealScalar;*/ \
|
||||||
allocate(matrix.rows(), matrix.cols(), computationOptions); \
|
allocate(matrix.rows(), matrix.cols(), computationOptions); \
|
||||||
\
|
\
|
||||||
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
|
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
|
||||||
|
|
|
@ -364,10 +364,11 @@ public:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
|
||||||
|
|
||||||
typename SparseMatrixType::Nested m_matrix;
|
typename SparseMatrixType::Nested m_matrix;
|
||||||
Index m_outerStart;
|
Index m_outerStart;
|
||||||
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
|
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//----------
|
//----------
|
||||||
|
@ -528,7 +529,8 @@ public:
|
||||||
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
||||||
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
|
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
|
||||||
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
|
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
|
||||||
|
private:
|
||||||
|
Index nonZeros() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
|
@ -55,10 +55,9 @@ class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
|
||||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
|
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
|
||||||
CwiseBinaryOpImpl()
|
CwiseBinaryOpImpl()
|
||||||
{
|
{
|
||||||
typedef typename internal::traits<Lhs>::StorageKind LhsStorageKind;
|
|
||||||
typedef typename internal::traits<Rhs>::StorageKind RhsStorageKind;
|
|
||||||
EIGEN_STATIC_ASSERT((
|
EIGEN_STATIC_ASSERT((
|
||||||
(!internal::is_same<LhsStorageKind,RhsStorageKind>::value)
|
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
|
||||||
|
typename internal::traits<Rhs>::StorageKind>::value)
|
||||||
|| ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))),
|
|| ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))),
|
||||||
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
|
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,9 +35,9 @@ class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
|
||||||
public:
|
public:
|
||||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
|
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
|
||||||
|
|
||||||
SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
|
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
|
||||||
typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
|
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
|
||||||
m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
|
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
|
||||||
|
|
||||||
class InnerIterator;
|
class InnerIterator;
|
||||||
|
|
||||||
|
|
|
@ -13,32 +13,24 @@
|
||||||
|
|
||||||
#include "details.h"
|
#include "details.h"
|
||||||
|
|
||||||
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
|
|
||||||
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
|
|
||||||
#define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) template class std::deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
|
|
||||||
#else
|
|
||||||
#define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This section contains a convenience MACRO which allows an easy specialization of
|
* This section contains a convenience MACRO which allows an easy specialization of
|
||||||
* std::deque such that for data types with alignment issues the correct allocator
|
* std::deque such that for data types with alignment issues the correct allocator
|
||||||
* is used automatically.
|
* is used automatically.
|
||||||
*/
|
*/
|
||||||
#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \
|
#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \
|
||||||
EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(__VA_ARGS__) \
|
|
||||||
namespace std \
|
namespace std \
|
||||||
{ \
|
{ \
|
||||||
template<typename _Ay> \
|
template<> \
|
||||||
class deque<__VA_ARGS__, _Ay> \
|
class deque<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
|
||||||
: public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
|
: public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
|
||||||
{ \
|
{ \
|
||||||
typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \
|
typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \
|
||||||
public: \
|
public: \
|
||||||
typedef __VA_ARGS__ value_type; \
|
typedef __VA_ARGS__ value_type; \
|
||||||
typedef typename deque_base::allocator_type allocator_type; \
|
typedef deque_base::allocator_type allocator_type; \
|
||||||
typedef typename deque_base::size_type size_type; \
|
typedef deque_base::size_type size_type; \
|
||||||
typedef typename deque_base::iterator iterator; \
|
typedef deque_base::iterator iterator; \
|
||||||
explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \
|
explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \
|
||||||
template<typename InputIterator> \
|
template<typename InputIterator> \
|
||||||
deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
|
deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
|
||||||
|
|
|
@ -12,32 +12,24 @@
|
||||||
|
|
||||||
#include "details.h"
|
#include "details.h"
|
||||||
|
|
||||||
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
|
|
||||||
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
|
|
||||||
#define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) template class std::list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
|
|
||||||
#else
|
|
||||||
#define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This section contains a convenience MACRO which allows an easy specialization of
|
* This section contains a convenience MACRO which allows an easy specialization of
|
||||||
* std::list such that for data types with alignment issues the correct allocator
|
* std::list such that for data types with alignment issues the correct allocator
|
||||||
* is used automatically.
|
* is used automatically.
|
||||||
*/
|
*/
|
||||||
#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \
|
#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \
|
||||||
EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(__VA_ARGS__) \
|
|
||||||
namespace std \
|
namespace std \
|
||||||
{ \
|
{ \
|
||||||
template<typename _Ay> \
|
template<> \
|
||||||
class list<__VA_ARGS__, _Ay> \
|
class list<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
|
||||||
: public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
|
: public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
|
||||||
{ \
|
{ \
|
||||||
typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \
|
typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \
|
||||||
public: \
|
public: \
|
||||||
typedef __VA_ARGS__ value_type; \
|
typedef __VA_ARGS__ value_type; \
|
||||||
typedef typename list_base::allocator_type allocator_type; \
|
typedef list_base::allocator_type allocator_type; \
|
||||||
typedef typename list_base::size_type size_type; \
|
typedef list_base::size_type size_type; \
|
||||||
typedef typename list_base::iterator iterator; \
|
typedef list_base::iterator iterator; \
|
||||||
explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \
|
explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \
|
||||||
template<typename InputIterator> \
|
template<typename InputIterator> \
|
||||||
list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
|
list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
|
||||||
|
|
|
@ -89,6 +89,7 @@ add_dependencies(doc-unsupported-prerequisites unsupported_snippets unsupported_
|
||||||
add_custom_target(doc ALL
|
add_custom_target(doc ALL
|
||||||
COMMAND doxygen
|
COMMAND doxygen
|
||||||
COMMAND doxygen Doxyfile-unsupported
|
COMMAND doxygen Doxyfile-unsupported
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy ${Eigen_BINARY_DIR}/doc/html/group__TopicUnalignedArrayAssert.html ${Eigen_BINARY_DIR}/doc/html/TopicUnalignedArrayAssert.html
|
||||||
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
|
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
|
||||||
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
|
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
|
||||||
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
|
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
|
||||||
|
|
|
@ -121,6 +121,8 @@ namespace Eigen {
|
||||||
\ingroup Sparse_chapter */
|
\ingroup Sparse_chapter */
|
||||||
/** \addtogroup TopicSparseSystems
|
/** \addtogroup TopicSparseSystems
|
||||||
\ingroup Sparse_chapter */
|
\ingroup Sparse_chapter */
|
||||||
|
/** \addtogroup MatrixfreeSolverExample
|
||||||
|
\ingroup Sparse_chapter */
|
||||||
|
|
||||||
/** \addtogroup Sparse_Reference
|
/** \addtogroup Sparse_Reference
|
||||||
\ingroup Sparse_chapter */
|
\ingroup Sparse_chapter */
|
||||||
|
|
|
@ -91,6 +91,7 @@ following macros are supported; none of them are defined by default.
|
||||||
- \b EIGEN_MATRIX_PLUGIN - filename of plugin for extending the Matrix class.
|
- \b EIGEN_MATRIX_PLUGIN - filename of plugin for extending the Matrix class.
|
||||||
- \b EIGEN_MATRIXBASE_PLUGIN - filename of plugin for extending the MatrixBase class.
|
- \b EIGEN_MATRIXBASE_PLUGIN - filename of plugin for extending the MatrixBase class.
|
||||||
- \b EIGEN_PLAINOBJECTBASE_PLUGIN - filename of plugin for extending the PlainObjectBase class.
|
- \b EIGEN_PLAINOBJECTBASE_PLUGIN - filename of plugin for extending the PlainObjectBase class.
|
||||||
|
- \b EIGEN_MAPBASE_PLUGIN - filename of plugin for extending the MapBase class.
|
||||||
- \b EIGEN_QUATERNIONBASE_PLUGIN - filename of plugin for extending the QuaternionBase class.
|
- \b EIGEN_QUATERNIONBASE_PLUGIN - filename of plugin for extending the QuaternionBase class.
|
||||||
- \b EIGEN_SPARSEMATRIX_PLUGIN - filename of plugin for extending the SparseMatrix class.
|
- \b EIGEN_SPARSEMATRIX_PLUGIN - filename of plugin for extending the SparseMatrix class.
|
||||||
- \b EIGEN_SPARSEMATRIXBASE_PLUGIN - filename of plugin for extending the SparseMatrixBase class.
|
- \b EIGEN_SPARSEMATRIXBASE_PLUGIN - filename of plugin for extending the SparseMatrixBase class.
|
||||||
|
|
|
@ -35,17 +35,17 @@ They are summarized in the following table:
|
||||||
<td>Requires the <a href="http://pastix.gforge.inria.fr">PaStiX</a> package, \b CeCILL-C </td>
|
<td>Requires the <a href="http://pastix.gforge.inria.fr">PaStiX</a> package, \b CeCILL-C </td>
|
||||||
<td>optimized for tough problems and symmetric patterns</td></tr>
|
<td>optimized for tough problems and symmetric patterns</td></tr>
|
||||||
<tr><td>CholmodSupernodalLLT</td><td>\link CholmodSupport_Module CholmodSupport \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
<tr><td>CholmodSupernodalLLT</td><td>\link CholmodSupport_Module CholmodSupport \endlink</td><td>Direct LLt factorization</td><td>SPD</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||||
<td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
|
<td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
|
||||||
<td></td></tr>
|
<td></td></tr>
|
||||||
<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||||
<td>Requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td>
|
<td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
|
||||||
<td></td></tr>
|
<td></td></tr>
|
||||||
<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
|
||||||
<td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
|
<td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
|
||||||
<td></td></tr>
|
<td></td></tr>
|
||||||
<tr><td>SPQR</td><td>\link SPQRSupport_Module SPQRSupport \endlink </td> <td> QR factorization </td>
|
<tr><td>SPQR</td><td>\link SPQRSupport_Module SPQRSupport \endlink </td> <td> QR factorization </td>
|
||||||
<td> Any, rectangular</td><td>fill-in reducing, multithreaded, fast dense algebra</td>
|
<td> Any, rectangular</td><td>fill-in reducing, multithreaded, fast dense algebra</td>
|
||||||
<td> requires the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
|
<td> requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
Here \c SPD means symmetric positive definite.
|
Here \c SPD means symmetric positive definite.
|
||||||
|
|
|
@ -21,7 +21,7 @@ i.e either row major or column major. The default is column major. Most arithmet
|
||||||
<td> Resize/Reserve</td>
|
<td> Resize/Reserve</td>
|
||||||
<td>
|
<td>
|
||||||
\code
|
\code
|
||||||
sm1.resize(m,n); //Change sm1 to a m x n matrix.
|
sm1.resize(m,n); // Change sm1 to a m x n matrix.
|
||||||
sm1.reserve(nnz); // Allocate room for nnz nonzeros elements.
|
sm1.reserve(nnz); // Allocate room for nnz nonzeros elements.
|
||||||
\endcode
|
\endcode
|
||||||
</td>
|
</td>
|
||||||
|
@ -153,7 +153,7 @@ It is easy to perform arithmetic operations on sparse matrices provided that the
|
||||||
\code
|
\code
|
||||||
perm.indices(); // Reference to the vector of indices
|
perm.indices(); // Reference to the vector of indices
|
||||||
sm1.twistedBy(perm); // Permute rows and columns
|
sm1.twistedBy(perm); // Permute rows and columns
|
||||||
sm2 = sm1 * perm; //Permute the columns
|
sm2 = sm1 * perm; // Permute the columns
|
||||||
sm2 = perm * sm1; // Permute the columns
|
sm2 = perm * sm1; // Permute the columns
|
||||||
\endcode
|
\endcode
|
||||||
</td>
|
</td>
|
||||||
|
@ -181,9 +181,9 @@ sm2 = perm * sm1; // Permute the columns
|
||||||
|
|
||||||
\section sparseotherops Other supported operations
|
\section sparseotherops Other supported operations
|
||||||
<table class="manual">
|
<table class="manual">
|
||||||
<tr><th>Operations</th> <th> Code </th> <th> Notes</th> </tr>
|
<tr><th style="min-width:initial"> Code </th> <th> Notes</th> </tr>
|
||||||
|
<tr><td colspan="2">Sub-matrices</td></tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>Sub-matrices</td>
|
|
||||||
<td>
|
<td>
|
||||||
\code
|
\code
|
||||||
sm1.block(startRow, startCol, rows, cols);
|
sm1.block(startRow, startCol, rows, cols);
|
||||||
|
@ -193,25 +193,31 @@ sm2 = perm * sm1; // Permute the columns
|
||||||
sm1.bottomLeftCorner( rows, cols);
|
sm1.bottomLeftCorner( rows, cols);
|
||||||
sm1.bottomRightCorner( rows, cols);
|
sm1.bottomRightCorner( rows, cols);
|
||||||
\endcode
|
\endcode
|
||||||
</td> <td> </td>
|
</td><td>
|
||||||
|
Contrary to dense matrices, here <strong>all these methods are read-only</strong>.\n
|
||||||
|
See \ref TutorialSparse_SubMatrices and below for read-write sub-matrices.
|
||||||
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr class="alt"><td colspan="2"> Range </td></tr>
|
||||||
<td> Range </td>
|
<tr class="alt">
|
||||||
<td>
|
<td>
|
||||||
\code
|
\code
|
||||||
sm1.innerVector(outer);
|
sm1.innerVector(outer); // RW
|
||||||
sm1.innerVectors(start, size);
|
sm1.innerVectors(start, size); // RW
|
||||||
sm1.leftCols(size);
|
sm1.leftCols(size); // RW
|
||||||
sm2.rightCols(size);
|
sm2.rightCols(size); // RO because sm2 is row-major
|
||||||
sm1.middleRows(start, numRows);
|
sm1.middleRows(start, numRows); // RO becasue sm1 is column-major
|
||||||
sm1.middleCols(start, numCols);
|
sm1.middleCols(start, numCols); // RW
|
||||||
sm1.col(j);
|
sm1.col(j); // RW
|
||||||
\endcode
|
\endcode
|
||||||
</td>
|
</td>
|
||||||
<td>A inner vector is either a row (for row-major) or a column (for column-major). As stated earlier, the evaluation can be done in a matrix with different storage order </td>
|
<td>
|
||||||
|
A inner vector is either a row (for row-major) or a column (for column-major).\n
|
||||||
|
As stated earlier, for a read-write sub-matrix (RW), the evaluation can be done in a matrix with different storage order.
|
||||||
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr><td colspan="2"> Triangular and selfadjoint views</td></tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td> Triangular and selfadjoint views</td>
|
|
||||||
<td>
|
<td>
|
||||||
\code
|
\code
|
||||||
sm2 = sm1.triangularview<Lower>();
|
sm2 = sm1.triangularview<Lower>();
|
||||||
|
@ -222,26 +228,30 @@ sm2 = perm * sm1; // Permute the columns
|
||||||
\code
|
\code
|
||||||
\endcode </td>
|
\endcode </td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr class="alt"><td colspan="2">Triangular solve </td></tr>
|
||||||
<td>Triangular solve </td>
|
<tr class="alt">
|
||||||
<td>
|
<td>
|
||||||
\code
|
\code
|
||||||
dv2 = sm1.triangularView<Upper>().solve(dv1);
|
dv2 = sm1.triangularView<Upper>().solve(dv1);
|
||||||
dv2 = sm1.topLeftCorner(size, size).triangularView<Lower>().solve(dv1);
|
dv2 = sm1.topLeftCorner(size, size)
|
||||||
|
.triangularView<Lower>().solve(dv1);
|
||||||
\endcode
|
\endcode
|
||||||
</td>
|
</td>
|
||||||
<td> For general sparse solve, Use any suitable module described at \ref TopicSparseSystems </td>
|
<td> For general sparse solve, Use any suitable module described at \ref TopicSparseSystems </td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr><td colspan="2"> Low-level API</td></tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td> Low-level API</td>
|
|
||||||
<td>
|
<td>
|
||||||
\code
|
\code
|
||||||
sm1.valuePtr(); // Pointer to the values
|
sm1.valuePtr(); // Pointer to the values
|
||||||
sm1.innerIndextr(); // Pointer to the indices.
|
sm1.innerIndextr(); // Pointer to the indices.
|
||||||
sm1.outerIndexPtr(); //Pointer to the beginning of each inner vector
|
sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector
|
||||||
\endcode
|
\endcode
|
||||||
</td>
|
</td>
|
||||||
<td> If the matrix is not in compressed form, makeCompressed() should be called before. Note that these functions are mostly provided for interoperability purposes with external libraries. A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
|
<td>
|
||||||
|
If the matrix is not in compressed form, makeCompressed() should be called before.\n
|
||||||
|
Note that these functions are mostly provided for interoperability purposes with external libraries.\n
|
||||||
|
A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -241,11 +241,11 @@ In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm
|
||||||
sm1.real() sm1.imag() -sm1 0.5*sm1
|
sm1.real() sm1.imag() -sm1 0.5*sm1
|
||||||
sm1+sm2 sm1-sm2 sm1.cwiseProduct(sm2)
|
sm1+sm2 sm1-sm2 sm1.cwiseProduct(sm2)
|
||||||
\endcode
|
\endcode
|
||||||
However, a strong restriction is that the storage orders must match. For instance, in the following example:
|
However, <strong>a strong restriction is that the storage orders must match</strong>. For instance, in the following example:
|
||||||
\code
|
\code
|
||||||
sm4 = sm1 + sm2 + sm3;
|
sm4 = sm1 + sm2 + sm3;
|
||||||
\endcode
|
\endcode
|
||||||
sm1, sm2, and sm3 must all be row-major or all column major.
|
sm1, sm2, and sm3 must all be row-major or all column-major.
|
||||||
On the other hand, there is no restriction on the target matrix sm4.
|
On the other hand, there is no restriction on the target matrix sm4.
|
||||||
For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order:
|
For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order:
|
||||||
\code
|
\code
|
||||||
|
@ -307,6 +307,26 @@ sm2 = sm1.transpose() * P;
|
||||||
\endcode
|
\endcode
|
||||||
|
|
||||||
|
|
||||||
|
\subsection TutorialSparse_SubMatrices Block operations
|
||||||
|
|
||||||
|
Regarding read-access, sparse matrices expose the same API than for dense matrices to access to sub-matrices such as blocks, columns, and rows. See \ref TutorialBlockOperations for a detailed introduction.
|
||||||
|
However, for performance reasons, writing to a sub-sparse-matrix is much more limited, and currently only contiguous sets of columns (resp. rows) of a column-major (resp. row-major) SparseMatrix are writable. Moreover, this information has to be known at compile-time, leaving out methods such as <tt>block(...)</tt> and <tt>corner*(...)</tt>. The available API for write-access to a SparseMatrix are summarized below:
|
||||||
|
\code
|
||||||
|
SparseMatrix<double,ColMajor> sm1;
|
||||||
|
sm1.col(j) = ...;
|
||||||
|
sm1.leftCols(ncols) = ...;
|
||||||
|
sm1.middleCols(j,ncols) = ...;
|
||||||
|
sm1.rightCols(ncols) = ...;
|
||||||
|
|
||||||
|
SparseMatrix<double,RowMajor> sm2;
|
||||||
|
sm2.row(i) = ...;
|
||||||
|
sm2.topRows(nrows) = ...;
|
||||||
|
sm2.middleRows(i,nrows) = ...;
|
||||||
|
sm2.bottomRows(nrows) = ...;
|
||||||
|
\endcode
|
||||||
|
|
||||||
|
In addition, sparse matrices expose the SparseMatrixBase::innerVector() and SparseMatrixBase::innerVectors() methods, which are aliases to the col/middleCols methods for a column-major storage, and to the row/middleRows methods for a row-major storage.
|
||||||
|
|
||||||
\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
|
\subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views
|
||||||
|
|
||||||
Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
|
Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side:
|
||||||
|
|
|
@ -7,8 +7,8 @@ Hello! You are seeing this webpage because your program terminated on an asserti
|
||||||
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
|
my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44:
|
||||||
Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
|
Eigen::internal::matrix_array<T, Size, MatrixOptions, Align>::internal::matrix_array()
|
||||||
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
|
[with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]:
|
||||||
Assertion `(reinterpret_cast<size_t>(array) & 0xf) == 0 && "this assertion
|
Assertion `(reinterpret_cast<size_t>(array) & (sizemask)) == 0 && "this assertion
|
||||||
is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html
|
is explained here: http://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
|
||||||
**** READ THIS WEB PAGE !!! ****"' failed.
|
**** READ THIS WEB PAGE !!! ****"' failed.
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
|
@ -46,9 +46,9 @@ then you need to read this separate page: \ref TopicStructHavingEigenMembers "St
|
||||||
|
|
||||||
Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
|
||||||
|
|
||||||
\section c2 Cause 2: STL Containers
|
\section c2 Cause 2: STL Containers or manual memory allocation
|
||||||
|
|
||||||
If you use STL Containers such as std::vector, std::map, ..., with Eigen objects, or with classes containing Eigen objects, like this,
|
If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this,
|
||||||
|
|
||||||
\code
|
\code
|
||||||
std::vector<Eigen::Matrix2f> my_vector;
|
std::vector<Eigen::Matrix2f> my_vector;
|
||||||
|
@ -60,6 +60,8 @@ then you need to read this separate page: \ref TopicStlContainers "Using STL Con
|
||||||
|
|
||||||
Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
|
Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
|
||||||
|
|
||||||
|
The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c std::make_shared or \c std::allocate_shared for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
|
||||||
|
|
||||||
\section c3 Cause 3: Passing Eigen objects by value
|
\section c3 Cause 3: Passing Eigen objects by value
|
||||||
|
|
||||||
If some function in your code is getting an Eigen object passed by value, like this,
|
If some function in your code is getting an Eigen object passed by value, like this,
|
||||||
|
@ -107,7 +109,10 @@ Two possibilities:
|
||||||
128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
|
128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
For more information, see <a href="http://eigen.tuxfamily.org/index.php?title=FAQ#I_disabled_vectorization.2C_but_I.27m_still_getting_annoyed_about_alignment_issues.21">this FAQ</a>.
|
If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 128-bit alignment and the assertion, here's the explanation:
|
||||||
|
|
||||||
|
It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
|
||||||
|
It doesn't disable 128bit alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
|
prefix=@CMAKE_INSTALL_PREFIX@
|
||||||
|
exec_prefix=${prefix}
|
||||||
|
|
||||||
Name: Eigen3
|
Name: Eigen3
|
||||||
Description: A C++ template library for linear algebra: vectors, matrices, and related algorithms
|
Description: A C++ template library for linear algebra: vectors, matrices, and related algorithms
|
||||||
Requires:
|
Requires:
|
||||||
Version: ${EIGEN_VERSION_NUMBER}
|
Version: @EIGEN_VERSION_NUMBER@
|
||||||
Libs:
|
Libs:
|
||||||
Cflags: -I${INCLUDE_INSTALL_DIR}
|
Cflags: -I${prefix}/@INCLUDE_INSTALL_DIR@
|
||||||
|
|
|
@ -202,7 +202,9 @@ ei_add_test(geo_alignedbox)
|
||||||
ei_add_test(stdvector)
|
ei_add_test(stdvector)
|
||||||
ei_add_test(stdvector_overload)
|
ei_add_test(stdvector_overload)
|
||||||
ei_add_test(stdlist)
|
ei_add_test(stdlist)
|
||||||
|
ei_add_test(stdlist_overload)
|
||||||
ei_add_test(stddeque)
|
ei_add_test(stddeque)
|
||||||
|
ei_add_test(stddeque_overload)
|
||||||
ei_add_test(resize)
|
ei_add_test(resize)
|
||||||
ei_add_test(sparse_vector)
|
ei_add_test(sparse_vector)
|
||||||
ei_add_test(sparse_basic)
|
ei_add_test(sparse_basic)
|
||||||
|
|
|
@ -87,6 +87,32 @@ template<typename T> void check_dynaligned()
|
||||||
delete obj;
|
delete obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T> void check_custom_new_delete()
|
||||||
|
{
|
||||||
|
{
|
||||||
|
T* t = new T;
|
||||||
|
delete t;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::size_t N = internal::random<std::size_t>(1,10);
|
||||||
|
T* t = new T[N];
|
||||||
|
delete[] t;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef EIGEN_ALIGN
|
||||||
|
{
|
||||||
|
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
|
||||||
|
(T::operator delete)(t, sizeof(T));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
|
||||||
|
(T::operator delete)(t);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void test_dynalloc()
|
void test_dynalloc()
|
||||||
{
|
{
|
||||||
// low level dynamic memory allocation
|
// low level dynamic memory allocation
|
||||||
|
@ -95,6 +121,8 @@ void test_dynalloc()
|
||||||
CALL_SUBTEST(check_aligned_new());
|
CALL_SUBTEST(check_aligned_new());
|
||||||
CALL_SUBTEST(check_aligned_stack_alloc());
|
CALL_SUBTEST(check_aligned_stack_alloc());
|
||||||
|
|
||||||
|
// check static allocation, who knows ?
|
||||||
|
#if EIGEN_ALIGN_STATICALLY
|
||||||
for (int i=0; i<g_repeat*100; ++i)
|
for (int i=0; i<g_repeat*100; ++i)
|
||||||
{
|
{
|
||||||
CALL_SUBTEST(check_dynaligned<Vector4f>() );
|
CALL_SUBTEST(check_dynaligned<Vector4f>() );
|
||||||
|
@ -102,10 +130,13 @@ void test_dynalloc()
|
||||||
CALL_SUBTEST(check_dynaligned<Matrix4f>() );
|
CALL_SUBTEST(check_dynaligned<Matrix4f>() );
|
||||||
CALL_SUBTEST(check_dynaligned<Vector4d>() );
|
CALL_SUBTEST(check_dynaligned<Vector4d>() );
|
||||||
CALL_SUBTEST(check_dynaligned<Vector4i>() );
|
CALL_SUBTEST(check_dynaligned<Vector4i>() );
|
||||||
|
|
||||||
|
CALL_SUBTEST( check_custom_new_delete<Vector4f>() );
|
||||||
|
CALL_SUBTEST( check_custom_new_delete<Vector2f>() );
|
||||||
|
CALL_SUBTEST( check_custom_new_delete<Matrix4f>() );
|
||||||
|
CALL_SUBTEST( check_custom_new_delete<MatrixXi>() );
|
||||||
}
|
}
|
||||||
|
|
||||||
// check static allocation, who knows ?
|
|
||||||
#if EIGEN_ALIGN_STATICALLY
|
|
||||||
{
|
{
|
||||||
MyStruct foo0; VERIFY(size_t(foo0.avec.data())%ALIGNMENT==0);
|
MyStruct foo0; VERIFY(size_t(foo0.avec.data())%ALIGNMENT==0);
|
||||||
MyClassA fooA; VERIFY(size_t(fooA.avec.data())%ALIGNMENT==0);
|
MyClassA fooA; VERIFY(size_t(fooA.avec.data())%ALIGNMENT==0);
|
||||||
|
|
|
@ -136,9 +136,27 @@ template<typename MatrixType> void product(const MatrixType& m)
|
||||||
VERIFY_IS_APPROX(res.col(r).noalias() = square.adjoint() * square.col(r), (square.adjoint() * square.col(r)).eval());
|
VERIFY_IS_APPROX(res.col(r).noalias() = square.adjoint() * square.col(r), (square.adjoint() * square.col(r)).eval());
|
||||||
VERIFY_IS_APPROX(res.col(r).noalias() = square * square.col(r), (square * square.col(r)).eval());
|
VERIFY_IS_APPROX(res.col(r).noalias() = square * square.col(r), (square * square.col(r)).eval());
|
||||||
|
|
||||||
|
// vector at runtime (see bug 1166)
|
||||||
|
{
|
||||||
|
RowSquareMatrixType ref(square);
|
||||||
|
ColSquareMatrixType ref2(square2);
|
||||||
|
ref = res = square;
|
||||||
|
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.col(0).transpose() * square.transpose(), (ref.row(0) = m1.col(0).transpose() * square.transpose()));
|
||||||
|
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.block(0,0,rows,1).transpose() * square.transpose(), (ref.row(0) = m1.col(0).transpose() * square.transpose()));
|
||||||
|
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.col(0).transpose() * square, (ref.row(0) = m1.col(0).transpose() * square));
|
||||||
|
VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.block(0,0,rows,1).transpose() * square, (ref.row(0) = m1.col(0).transpose() * square));
|
||||||
|
ref2 = res2 = square2;
|
||||||
|
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.row(0) * square2.transpose(), (ref2.row(0) = m1.row(0) * square2.transpose()));
|
||||||
|
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.block(0,0,1,cols) * square2.transpose(), (ref2.row(0) = m1.row(0) * square2.transpose()));
|
||||||
|
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.row(0) * square2, (ref2.row(0) = m1.row(0) * square2));
|
||||||
|
VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.block(0,0,1,cols) * square2, (ref2.row(0) = m1.row(0) * square2));
|
||||||
|
}
|
||||||
|
|
||||||
// inner product
|
// inner product
|
||||||
|
{
|
||||||
Scalar x = square2.row(c) * square2.col(c2);
|
Scalar x = square2.row(c) * square2.col(c2);
|
||||||
VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum());
|
VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum());
|
||||||
|
}
|
||||||
|
|
||||||
// outer product
|
// outer product
|
||||||
VERIFY_IS_APPROX(m1.col(c) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
|
VERIFY_IS_APPROX(m1.col(c) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
|
||||||
|
@ -147,4 +165,17 @@ template<typename MatrixType> void product(const MatrixType& m)
|
||||||
VERIFY_IS_APPROX(m1.col(c) * m1.block(r,0,1,cols), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
|
VERIFY_IS_APPROX(m1.col(c) * m1.block(r,0,1,cols), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
|
||||||
VERIFY_IS_APPROX(m1.leftCols(1) * m1.row(r), m1.block(0,0,rows,1) * m1.block(r,0,1,cols));
|
VERIFY_IS_APPROX(m1.leftCols(1) * m1.row(r), m1.block(0,0,rows,1) * m1.block(r,0,1,cols));
|
||||||
VERIFY_IS_APPROX(m1.col(c) * m1.topRows(1), m1.block(0,c,rows,1) * m1.block(0,0,1,cols));
|
VERIFY_IS_APPROX(m1.col(c) * m1.topRows(1), m1.block(0,c,rows,1) * m1.block(0,0,1,cols));
|
||||||
|
|
||||||
|
// Aliasing
|
||||||
|
{
|
||||||
|
ColVectorType x(cols); x.setRandom();
|
||||||
|
ColVectorType z(x);
|
||||||
|
ColVectorType y(cols); y.setZero();
|
||||||
|
ColSquareMatrixType A(cols,cols); A.setRandom();
|
||||||
|
// CwiseBinaryOp
|
||||||
|
VERIFY_IS_APPROX(x = y + A*x, A*z);
|
||||||
|
x = z;
|
||||||
|
// CwiseUnaryOp
|
||||||
|
VERIFY_IS_APPROX(x = Scalar(1.)*(A*x), A*z);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,27 @@
|
||||||
|
|
||||||
#include "product.h"
|
#include "product.h"
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
void test_aliasing()
|
||||||
|
{
|
||||||
|
int rows = internal::random<int>(1,12);
|
||||||
|
int cols = internal::random<int>(1,12);
|
||||||
|
typedef Matrix<T,Dynamic,Dynamic> MatrixType;
|
||||||
|
typedef Matrix<T,Dynamic,1> VectorType;
|
||||||
|
VectorType x(cols); x.setRandom();
|
||||||
|
VectorType z(x);
|
||||||
|
VectorType y(rows); y.setZero();
|
||||||
|
MatrixType A(rows,cols); A.setRandom();
|
||||||
|
// CwiseBinaryOp
|
||||||
|
VERIFY_IS_APPROX(x = y + A*x, A*z);
|
||||||
|
x = z;
|
||||||
|
// CwiseUnaryOp
|
||||||
|
VERIFY_IS_APPROX(x = T(1.)*(A*x), A*z);
|
||||||
|
x = z;
|
||||||
|
VERIFY_IS_APPROX(x = y+(-(A*x)), -A*z);
|
||||||
|
x = z;
|
||||||
|
}
|
||||||
|
|
||||||
void test_product_large()
|
void test_product_large()
|
||||||
{
|
{
|
||||||
for(int i = 0; i < g_repeat; i++) {
|
for(int i = 0; i < g_repeat; i++) {
|
||||||
|
@ -17,6 +38,8 @@ void test_product_large()
|
||||||
CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
CALL_SUBTEST_3( product(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
||||||
CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
|
CALL_SUBTEST_4( product(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
|
||||||
CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
CALL_SUBTEST_5( product(Matrix<float,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
||||||
|
|
||||||
|
CALL_SUBTEST_1( test_aliasing<float>() );
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined EIGEN_TEST_PART_6
|
#if defined EIGEN_TEST_PART_6
|
||||||
|
|
|
@ -58,10 +58,19 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
|
||||||
r1 = internal::random<Index>(8,rows-r0);
|
r1 = internal::random<Index>(8,rows-r0);
|
||||||
|
|
||||||
VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()), 1);
|
VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()), 1);
|
||||||
|
VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()).transpose(), 1);
|
||||||
VERIFY_EVALUATION_COUNT( m3.noalias() = m1 * m2.adjoint(), 0);
|
VERIFY_EVALUATION_COUNT( m3.noalias() = m1 * m2.adjoint(), 0);
|
||||||
|
|
||||||
|
VERIFY_EVALUATION_COUNT( m3 = s1 * (m1 * m2.transpose()), 1);
|
||||||
|
VERIFY_EVALUATION_COUNT( m3 = m3 + s1 * (m1 * m2.transpose()), 1);
|
||||||
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * (m1 * m2.transpose()), 0);
|
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * (m1 * m2.transpose()), 0);
|
||||||
|
|
||||||
|
VERIFY_EVALUATION_COUNT( m3 = m3 + (m1 * m2.adjoint()), 1);
|
||||||
|
VERIFY_EVALUATION_COUNT( m3 = m3 + (m1 * m2.adjoint()).transpose(), 1);
|
||||||
|
VERIFY_EVALUATION_COUNT( m3.noalias() = m3 + m1 * m2.transpose(), 1); // 0 in 3.3
|
||||||
|
VERIFY_EVALUATION_COUNT( m3.noalias() += m3 + m1 * m2.transpose(), 1); // 0 in 3.3
|
||||||
|
VERIFY_EVALUATION_COUNT( m3.noalias() -= m3 + m1 * m2.transpose(), 1); // 0 in 3.3
|
||||||
|
|
||||||
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * m1 * s2 * m2.adjoint(), 0);
|
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * m1 * s2 * m2.adjoint(), 0);
|
||||||
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * m1 * s2 * (m1*s3+m2*s2).adjoint(), 1);
|
VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * m1 * s2 * (m1*s3+m2*s2).adjoint(), 1);
|
||||||
VERIFY_EVALUATION_COUNT( m3.noalias() = (s1 * m1).adjoint() * s2 * m2, 0);
|
VERIFY_EVALUATION_COUNT( m3.noalias() = (s1 * m1).adjoint() * s2 * m2, 0);
|
||||||
|
|
|
@ -34,6 +34,18 @@ inline void on_temporary_creation(int) {
|
||||||
|
|
||||||
// test Ref.h
|
// test Ref.h
|
||||||
|
|
||||||
|
// Deal with i387 extended precision
|
||||||
|
#if EIGEN_ARCH_i386 && !(EIGEN_ARCH_x86_64)
|
||||||
|
|
||||||
|
#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(4,4)
|
||||||
|
#pragma GCC optimize ("-ffloat-store")
|
||||||
|
#else
|
||||||
|
#undef VERIFY_IS_EQUAL
|
||||||
|
#define VERIFY_IS_EQUAL(X,Y) VERIFY_IS_APPROX(X,Y)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
template<typename MatrixType> void ref_matrix(const MatrixType& m)
|
template<typename MatrixType> void ref_matrix(const MatrixType& m)
|
||||||
{
|
{
|
||||||
typedef typename MatrixType::Index Index;
|
typedef typename MatrixType::Index Index;
|
||||||
|
@ -71,7 +83,6 @@ template<typename MatrixType> void ref_matrix(const MatrixType& m)
|
||||||
rm2 = m2.block(i,j,brows,bcols);
|
rm2 = m2.block(i,j,brows,bcols);
|
||||||
VERIFY_IS_EQUAL(m1, m2);
|
VERIFY_IS_EQUAL(m1, m2);
|
||||||
|
|
||||||
|
|
||||||
ConstRefDynMat rm3 = m1.block(i,j,brows,bcols);
|
ConstRefDynMat rm3 = m1.block(i,j,brows,bcols);
|
||||||
m1.block(i,j,brows,bcols) *= 2;
|
m1.block(i,j,brows,bcols) *= 2;
|
||||||
m2.block(i,j,brows,bcols) *= 2;
|
m2.block(i,j,brows,bcols) *= 2;
|
||||||
|
|
|
@ -55,6 +55,11 @@ template<typename MatrixType> void matrixVisitor(const MatrixType& p)
|
||||||
VERIFY_IS_APPROX(maxc, eigen_maxc);
|
VERIFY_IS_APPROX(maxc, eigen_maxc);
|
||||||
VERIFY_IS_APPROX(minc, m.minCoeff());
|
VERIFY_IS_APPROX(minc, m.minCoeff());
|
||||||
VERIFY_IS_APPROX(maxc, m.maxCoeff());
|
VERIFY_IS_APPROX(maxc, m.maxCoeff());
|
||||||
|
|
||||||
|
eigen_maxc = (m.adjoint()*m).maxCoeff(&eigen_maxrow,&eigen_maxcol);
|
||||||
|
eigen_maxc = (m.adjoint()*m).eval().maxCoeff(&maxrow,&maxcol);
|
||||||
|
VERIFY(maxrow == eigen_maxrow);
|
||||||
|
VERIFY(maxcol == eigen_maxcol);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename VectorType> void vectorVisitor(const VectorType& w)
|
template<typename VectorType> void vectorVisitor(const VectorType& w)
|
||||||
|
|
|
@ -177,7 +177,7 @@ template<typename _Scalar> class AlignedVector3
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=NumTraits<Scalar>::dummy_precision()) const
|
inline bool isApprox(const MatrixBase<Derived>& other, const RealScalar& eps=NumTraits<Scalar>::dummy_precision()) const
|
||||||
{
|
{
|
||||||
return m_coeffs.template head<3>().isApprox(other,eps);
|
return m_coeffs.template head<3>().isApprox(other,eps);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue