Merge remote-tracking branch 'origin/develop' into feature/SmartFactors3

release/4.3a0
dellaert 2015-03-09 17:51:16 -07:00
commit a4d1874af4
100 changed files with 1517 additions and 878 deletions

View File

@ -270,7 +270,7 @@ function(install_wrapped_library_internal interfaceHeader)
if(GTSAM_BUILD_TYPE_POSTFIXES) if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper) string(TOUPPER "${build_type}" build_type_upper)
if("${build_type_upper}" STREQUAL "RELEASE") if(${build_type_upper} STREQUAL "RELEASE")
set(build_type_tag "") # Don't create release mode tag on installed directory set(build_type_tag "") # Don't create release mode tag on installed directory
else() else()
set(build_type_tag "${build_type}") set(build_type_tag "${build_type}")
@ -373,7 +373,7 @@ function(install_matlab_scripts source_directory patterns)
if(GTSAM_BUILD_TYPE_POSTFIXES) if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper) string(TOUPPER "${build_type}" build_type_upper)
if("${build_type_upper}" STREQUAL "RELEASE") if(${build_type_upper} STREQUAL "RELEASE")
set(build_type_tag "") # Don't create release mode tag on installed directory set(build_type_tag "") # Don't create release mode tag on installed directory
else() else()
set(build_type_tag "${build_type}") set(build_type_tag "${build_type}")

View File

@ -1,4 +1,4 @@
repo: 8a21fd850624c931e448cbcfb38168cb2717c790 repo: 8a21fd850624c931e448cbcfb38168cb2717c790
node: ffa86ffb557094721ca71dcea6aed2651b9fd610 node: 10219c95fe653d4962aa9db4946f6fbea96dd740
branch: 3.2 branch: 3.2
tag: 3.2.0 tag: 3.2.4

View File

@ -23,3 +23,7 @@ bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2
da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1 da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1
4b687cad1d23066f66863f4f87298447298443df 3.2-rc1 4b687cad1d23066f66863f4f87298447298443df 3.2-rc1
1eeda7b1258bcd306018c0738e2b6a8543661141 3.2-rc2 1eeda7b1258bcd306018c0738e2b6a8543661141 3.2-rc2
ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3

View File

@ -442,6 +442,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_transpositions.resize(size); m_transpositions.resize(size);
m_isInitialized = false; m_isInitialized = false;
m_temporary.resize(size); m_temporary.resize(size);
m_sign = internal::ZeroSign;
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign); internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
@ -502,7 +503,6 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
using std::abs; using std::abs;
using std::max; using std::max;
typedef typename LDLTType::MatrixType MatrixType; typedef typename LDLTType::MatrixType MatrixType;
typedef typename LDLTType::Scalar Scalar;
typedef typename LDLTType::RealScalar RealScalar; typedef typename LDLTType::RealScalar RealScalar;
const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD()); const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
// In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon

View File

@ -29,6 +29,11 @@ struct traits<ArrayWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type > : public traits<typename remove_all<typename ExpressionType::Nested>::type >
{ {
typedef ArrayXpr XprKind; typedef ArrayXpr XprKind;
// Let's remove NestByRefBit
enum {
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
Flags = Flags0 & ~NestByRefBit
};
}; };
} }
@ -149,6 +154,11 @@ struct traits<MatrixWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type > : public traits<typename remove_all<typename ExpressionType::Nested>::type >
{ {
typedef MatrixXpr XprKind; typedef MatrixXpr XprKind;
// Let's remove NestByRefBit
enum {
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
Flags = Flags0 & ~NestByRefBit
};
}; };
} }

View File

@ -462,8 +462,10 @@ template<typename Derived> class DenseBase
template<int p> RealScalar lpNorm() const; template<int p> RealScalar lpNorm() const;
template<int RowFactor, int ColFactor> template<int RowFactor, int ColFactor>
const Replicate<Derived,RowFactor,ColFactor> replicate() const; inline const Replicate<Derived,RowFactor,ColFactor> replicate() const;
const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
typedef Replicate<Derived,Dynamic,Dynamic> ReplicateReturnType;
inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const;
typedef Reverse<Derived, BothDirections> ReverseReturnType; typedef Reverse<Derived, BothDirections> ReverseReturnType;
typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType; typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;

View File

@ -190,18 +190,18 @@ MatrixBase<Derived>::diagonal() const
* *
* \sa MatrixBase::diagonal(), class Diagonal */ * \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived> template<typename Derived>
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) MatrixBase<Derived>::diagonal(Index index)
{ {
return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index); return DiagonalDynamicIndexReturnType(derived(), index);
} }
/** This is the const version of diagonal(Index). */ /** This is the const version of diagonal(Index). */
template<typename Derived> template<typename Derived>
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) const MatrixBase<Derived>::diagonal(Index index) const
{ {
return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index); return ConstDiagonalDynamicIndexReturnType(derived(), index);
} }
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this

View File

@ -232,7 +232,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
// FIXME not very good if rhs is real and lhs complex while alpha is real too // FIXME not very good if rhs is real and lhs complex while alpha is real too
const Index cols = dest.cols(); const Index cols = dest.cols();
for (Index j=0; j<cols; ++j) for (Index j=0; j<cols; ++j)
func(dest.col(j), prod.rhs().coeff(j) * prod.lhs()); func(dest.col(j), prod.rhs().coeff(0,j) * prod.lhs());
} }
// Row major // Row major
@ -243,7 +243,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
// FIXME not very good if lhs is real and rhs complex while alpha is real too // FIXME not very good if lhs is real and rhs complex while alpha is real too
const Index rows = dest.rows(); const Index rows = dest.rows();
for (Index i=0; i<rows; ++i) for (Index i=0; i<rows; ++i)
func(dest.row(i), prod.lhs().coeff(i) * prod.rhs()); func(dest.row(i), prod.lhs().coeff(i,0) * prod.rhs());
} }
template<typename Lhs, typename Rhs> template<typename Lhs, typename Rhs>

View File

@ -168,6 +168,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
template<typename Derived> class MapBase<Derived, WriteAccessors> template<typename Derived> class MapBase<Derived, WriteAccessors>
: public MapBase<Derived, ReadOnlyAccessors> : public MapBase<Derived, ReadOnlyAccessors>
{ {
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
public: public:
typedef MapBase<Derived, ReadOnlyAccessors> Base; typedef MapBase<Derived, ReadOnlyAccessors> Base;
@ -230,11 +231,13 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
Derived& operator=(const MapBase& other) Derived& operator=(const MapBase& other)
{ {
Base::Base::operator=(other); ReadOnlyMapBase::Base::operator=(other);
return derived(); return derived();
} }
using Base::Base::operator=; // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
// see bugs 821 and 920.
using ReadOnlyMapBase::Base::operator=;
}; };
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS #undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS

View File

@ -215,7 +215,7 @@ template<typename Derived> class MatrixBase
typedef Diagonal<Derived> DiagonalReturnType; typedef Diagonal<Derived> DiagonalReturnType;
DiagonalReturnType diagonal(); DiagonalReturnType diagonal();
typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType; typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
ConstDiagonalReturnType diagonal() const; ConstDiagonalReturnType diagonal() const;
template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; }; template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
@ -224,15 +224,11 @@ template<typename Derived> class MatrixBase
template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal(); template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const; template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
// Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType;
// On the other hand they confuse MSVC8... typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType;
#if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index); DiagonalDynamicIndexReturnType diagonal(Index index);
typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const; ConstDiagonalDynamicIndexReturnType diagonal(Index index) const;
#else
typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
#endif
#ifdef EIGEN2_SUPPORT #ifdef EIGEN2_SUPPORT
template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part(); template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();

View File

@ -555,7 +555,10 @@ struct permut_matrix_product_retval
const Index n = Side==OnTheLeft ? rows() : cols(); const Index n = Side==OnTheLeft ? rows() : cols();
// FIXME we need an is_same for expression that is not sensitive to constness. For instance // FIXME we need an is_same for expression that is not sensitive to constness. For instance
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true. // is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)) if( is_same<MatrixTypeNestedCleaned,Dest>::value
&& blas_traits<MatrixTypeNestedCleaned>::HasUsableDirectAccess
&& blas_traits<Dest>::HasUsableDirectAccess
&& extract_data(dst) == extract_data(m_matrix))
{ {
// apply the permutation inplace // apply the permutation inplace
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size()); Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());

View File

@ -85,7 +85,14 @@ class ProductBase : public MatrixBase<Derived>
public: public:
#ifndef EIGEN_NO_MALLOC
typedef typename Base::PlainObject BasePlainObject;
typedef Matrix<Scalar,RowsAtCompileTime==1?1:Dynamic,ColsAtCompileTime==1?1:Dynamic,BasePlainObject::Options> DynPlainObject;
typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)),
BasePlainObject, DynPlainObject>::type PlainObject;
#else
typedef typename Base::PlainObject PlainObject; typedef typename Base::PlainObject PlainObject;
#endif
ProductBase(const Lhs& a_lhs, const Rhs& a_rhs) ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
: m_lhs(a_lhs), m_rhs(a_rhs) : m_lhs(a_lhs), m_rhs(a_rhs)
@ -180,7 +187,12 @@ namespace internal {
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject> template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject> struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
{ {
typedef PlainObject const& type; typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
};
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
struct nested<const GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
{
typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
}; };
} }

View File

@ -188,6 +188,8 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
: public RefBase<Ref<PlainObjectType, Options, StrideType> > : public RefBase<Ref<PlainObjectType, Options, StrideType> >
{ {
typedef internal::traits<Ref> Traits; typedef internal::traits<Ref> Traits;
template<typename Derived>
inline Ref(const PlainObjectBase<Derived>& expr);
public: public:
typedef RefBase<Ref> Base; typedef RefBase<Ref> Base;
@ -196,20 +198,21 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived> template<typename Derived>
inline Ref(PlainObjectBase<Derived>& expr, inline Ref(PlainObjectBase<Derived>& expr)
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
{ {
Base::construct(expr); EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.derived());
} }
template<typename Derived> template<typename Derived>
inline Ref(const DenseBase<Derived>& expr, inline Ref(const DenseBase<Derived>& expr)
typename internal::enable_if<bool(internal::is_lvalue<Derived>::value&&bool(Traits::template match<Derived>::MatchAtCompileTime)),Derived>::type* = 0,
int = Derived::ThisConstantIsPrivateInPlainObjectBase)
#else #else
template<typename Derived> template<typename Derived>
inline Ref(DenseBase<Derived>& expr) inline Ref(DenseBase<Derived>& expr)
#endif #endif
{ {
EIGEN_STATIC_ASSERT(static_cast<bool>(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
enum { THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY = Derived::ThisConstantIsPrivateInPlainObjectBase};
Base::construct(expr.const_cast_derived()); Base::construct(expr.const_cast_derived());
} }

View File

@ -135,7 +135,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
*/ */
template<typename Derived> template<typename Derived>
template<int RowFactor, int ColFactor> template<int RowFactor, int ColFactor>
inline const Replicate<Derived,RowFactor,ColFactor> const Replicate<Derived,RowFactor,ColFactor>
DenseBase<Derived>::replicate() const DenseBase<Derived>::replicate() const
{ {
return Replicate<Derived,RowFactor,ColFactor>(derived()); return Replicate<Derived,RowFactor,ColFactor>(derived());
@ -150,7 +150,7 @@ DenseBase<Derived>::replicate() const
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
*/ */
template<typename Derived> template<typename Derived>
inline const Replicate<Derived,Dynamic,Dynamic> const typename DenseBase<Derived>::ReplicateReturnType
DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
{ {
return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor); return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);

View File

@ -380,19 +380,19 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other) EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{ {
setZero(); setZero();
return assignProduct(other,1); return assignProduct(other.derived(),1);
} }
template<typename ProductDerived, typename Lhs, typename Rhs> template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other) EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{ {
return assignProduct(other,1); return assignProduct(other.derived(),1);
} }
template<typename ProductDerived, typename Lhs, typename Rhs> template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other) EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{ {
return assignProduct(other,-1); return assignProduct(other.derived(),-1);
} }
@ -400,19 +400,19 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other) EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
{ {
setZero(); setZero();
return assignProduct(other,other.alpha()); return assignProduct(other.derived(),other.alpha());
} }
template<typename ProductDerived> template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other) EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
{ {
return assignProduct(other,other.alpha()); return assignProduct(other.derived(),other.alpha());
} }
template<typename ProductDerived> template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other) EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
{ {
return assignProduct(other,-other.alpha()); return assignProduct(other.derived(),-other.alpha());
} }
protected: protected:
@ -420,6 +420,15 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
template<typename ProductDerived, typename Lhs, typename Rhs> template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha); EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
template<int Mode, bool LhsIsTriangular,
typename Lhs, bool LhsIsVector,
typename Rhs, bool RhsIsVector>
EIGEN_STRONG_INLINE TriangularView& assignProduct(const TriangularProduct<Mode, LhsIsTriangular, Lhs, LhsIsVector, Rhs, RhsIsVector>& prod, const Scalar& alpha)
{
lazyAssign(alpha*prod.eval());
return *this;
}
MatrixTypeNested m_matrix; MatrixTypeNested m_matrix;
}; };

View File

@ -110,7 +110,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { __pld((float *)addr); } template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((float *)addr); }
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a) template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{ {

View File

@ -49,8 +49,17 @@ typedef uint32x4_t Packet4ui;
#define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W} #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
#endif #endif
#ifndef __pld // arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function
#define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" ); // which available on LLVM and GCC (at least)
#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
#define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
#elif defined __pld
#define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
#elif !defined(__aarch64__)
#define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
#else
// by default no explicit prefetching
#define EIGEN_ARM_PREFETCH(ADDR)
#endif #endif
template<> struct packet_traits<float> : default_packet_traits template<> struct packet_traits<float> : default_packet_traits
@ -209,8 +218,8 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); } template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { __pld(addr); } template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ARM_PREFETCH(addr); }
// FIXME only store the 2 first elements ? // FIXME only store the 2 first elements ?
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }

View File

@ -52,7 +52,7 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
Packet4i emm0; Packet4i emm0;
Packet4f invalid_mask = _mm_cmplt_ps(x, _mm_setzero_ps()); Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps()); Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
@ -166,7 +166,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
emm0 = _mm_cvttps_epi32(fx); emm0 = _mm_cvttps_epi32(fx);
emm0 = _mm_add_epi32(emm0, p4i_0x7f); emm0 = _mm_add_epi32(emm0, p4i_0x7f);
emm0 = _mm_slli_epi32(emm0, 23); emm0 = _mm_slli_epi32(emm0, 23);
return pmul(y, _mm_castsi128_ps(emm0)); return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
} }
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d pexp<Packet2d>(const Packet2d& _x) Packet2d pexp<Packet2d>(const Packet2d& _x)
@ -239,7 +239,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
emm0 = _mm_add_epi32(emm0, p4i_1023_0); emm0 = _mm_add_epi32(emm0, p4i_1023_0);
emm0 = _mm_slli_epi32(emm0, 20); emm0 = _mm_slli_epi32(emm0, 20);
emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3)); emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
return pmul(x, _mm_castsi128_pd(emm0)); return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
} }
/* evaluation of 4 sines at onces, using SSE2 intrinsics. /* evaluation of 4 sines at onces, using SSE2 intrinsics.

View File

@ -90,6 +90,7 @@ struct traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
| (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0), | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0),
CoeffReadCost = InnerSize == Dynamic ? Dynamic CoeffReadCost = InnerSize == Dynamic ? Dynamic
: InnerSize == 0 ? 0
: InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
+ (InnerSize - 1) * NumTraits<Scalar>::AddCost, + (InnerSize - 1) * NumTraits<Scalar>::AddCost,
@ -133,7 +134,7 @@ class CoeffBasedProduct
}; };
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal, typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
Unroll ? InnerSize-1 : Dynamic, Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl; _LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType; typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
@ -184,7 +185,7 @@ class CoeffBasedProduct
{ {
PacketScalar res; PacketScalar res;
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor, internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
Unroll ? InnerSize-1 : Dynamic, Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
_LhsNested, _RhsNested, PacketScalar, LoadMode> _LhsNested, _RhsNested, PacketScalar, LoadMode>
::run(row, col, m_lhs, m_rhs, res); ::run(row, col, m_lhs, m_rhs, res);
return res; return res;
@ -262,10 +263,7 @@ struct product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
typedef typename Lhs::Index Index; typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
{ {
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = (lhs.row(row).transpose().cwiseProduct( rhs.col(col) )).sum();
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
for(Index i = 1; i < lhs.cols(); ++i)
res += lhs.coeff(row, i) * rhs.coeff(i, col);
} }
}; };

View File

@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3 #define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2 #define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 2 #define EIGEN_MINOR_VERSION 4
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@ -96,6 +96,13 @@
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t #define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif #endif
// Cross compiler wrapper around LLVM's __has_builtin
#ifdef __has_builtin
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
#else
# define EIGEN_HAS_BUILTIN(x) 0
#endif
/** Allows to disable some optimizations which might affect the accuracy of the result. /** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include: * They currently include:
@ -247,7 +254,7 @@ namespace Eigen {
#if !defined(EIGEN_ASM_COMMENT) #if !defined(EIGEN_ASM_COMMENT)
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) #if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
#define EIGEN_ASM_COMMENT(X) asm("#" X) #define EIGEN_ASM_COMMENT(X) __asm__("#" X)
#else #else
#define EIGEN_ASM_COMMENT(X) #define EIGEN_ASM_COMMENT(X)
#endif #endif
@ -306,7 +313,7 @@ namespace Eigen {
// just an empty macro ! // just an empty macro !
#define EIGEN_EMPTY #define EIGEN_EMPTY
#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER)) #if defined(_MSC_VER) && (_MSC_VER < 1900) && (!defined(__INTEL_COMPILER))
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; using Base::operator =;
#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653) #elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)

View File

@ -63,7 +63,7 @@
// Currently, let's include it only on unix systems: // Currently, let's include it only on unix systems:
#if defined(__unix__) || defined(__unix) #if defined(__unix__) || defined(__unix)
#include <unistd.h> #include <unistd.h>
#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0) #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
#define EIGEN_HAS_POSIX_MEMALIGN 1 #define EIGEN_HAS_POSIX_MEMALIGN 1
#endif #endif
#endif #endif
@ -417,6 +417,8 @@ template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pt
template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size) template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
{ {
if(size==0)
return 0; // short-cut. Also fixes Bug 884
check_size_for_overflow<T>(size); check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size)); T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
if(NumTraits<T>::RequireInitialization) if(NumTraits<T>::RequireInitialization)
@ -464,9 +466,8 @@ template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *
template<typename Scalar, typename Index> template<typename Scalar, typename Index>
static inline Index first_aligned(const Scalar* array, Index size) static inline Index first_aligned(const Scalar* array, Index size)
{ {
enum { PacketSize = packet_traits<Scalar>::size, static const Index PacketSize = packet_traits<Scalar>::size;
PacketAlignedMask = PacketSize-1 static const Index PacketAlignedMask = PacketSize-1;
};
if(PacketSize==1) if(PacketSize==1)
{ {
@ -612,7 +613,6 @@ template<typename T> class aligned_stack_memory_handler
void* operator new(size_t size, const std::nothrow_t&) throw() { \ void* operator new(size_t size, const std::nothrow_t&) throw() { \
try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \ try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
catch (...) { return 0; } \ catch (...) { return 0; } \
return 0; \
} }
#else #else
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \

View File

@ -90,7 +90,9 @@
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED, YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH, THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,
IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,
STORAGE_LAYOUT_DOES_NOT_MATCH
}; };
}; };

View File

@ -341,7 +341,7 @@ template<typename T, int n=1, typename PlainObject = typename eval<T>::type> str
}; };
template<typename T> template<typename T>
T* const_cast_ptr(const T* ptr) inline T* const_cast_ptr(const T* ptr)
{ {
return const_cast<T*>(ptr); return const_cast<T*>(ptr);
} }

View File

@ -147,7 +147,6 @@ void fitHyperplane(int numPoints,
// compute the covariance matrix // compute the covariance matrix
CovMatrixType covMat = CovMatrixType::Zero(size, size); CovMatrixType covMat = CovMatrixType::Zero(size, size);
VectorType remean = VectorType::Zero(size);
for(int i = 0; i < numPoints; ++i) for(int i = 0; i < numPoints; ++i)
{ {
VectorType diff = (*(points[i]) - mean).conjugate(); VectorType diff = (*(points[i]) - mean).conjugate();

View File

@ -313,7 +313,7 @@ namespace Eigen {
using std::abs; using std::abs;
using std::sqrt; using std::sqrt;
const Index dim=m_S.cols(); const Index dim=m_S.cols();
if (abs(m_S.coeff(i+1,i)==Scalar(0))) if (abs(m_S.coeff(i+1,i))==Scalar(0))
return; return;
Index z = findSmallDiagEntry(i,i+1); Index z = findSmallDiagEntry(i,i+1);
if (z==i-1) if (z==i-1)

View File

@ -563,7 +563,6 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
if(computeEigenvectors) if(computeEigenvectors)
{ {
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon(); Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
safeNorm2 *= safeNorm2;
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon()) if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{ {
eivecs.setIdentity(); eivecs.setIdentity();
@ -577,7 +576,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
Scalar d0 = eivals(2) - eivals(1); Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0); Scalar d1 = eivals(1) - eivals(0);
int k = d0 > d1 ? 2 : 0; int k = d0 > d1 ? 2 : 0;
d0 = d0 > d1 ? d1 : d0; d0 = d0 > d1 ? d0 : d1;
tmp.diagonal().array () -= eivals(k); tmp.diagonal().array () -= eivals(k);
VectorType cross; VectorType cross;
@ -585,19 +584,25 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm(); n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
if(n>safeNorm2) if(n>safeNorm2)
{
eivecs.col(k) = cross / sqrt(n); eivecs.col(k) = cross / sqrt(n);
}
else else
{ {
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm(); n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2) if(n>safeNorm2)
{
eivecs.col(k) = cross / sqrt(n); eivecs.col(k) = cross / sqrt(n);
}
else else
{ {
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm(); n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2) if(n>safeNorm2)
{
eivecs.col(k) = cross / sqrt(n); eivecs.col(k) = cross / sqrt(n);
}
else else
{ {
// the input matrix and/or the eigenvaues probably contains some inf/NaN, // the input matrix and/or the eigenvaues probably contains some inf/NaN,
@ -617,12 +622,16 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
tmp.diagonal().array() -= eivals(1); tmp.diagonal().array() -= eivals(1);
if(d0<=Eigen::NumTraits<Scalar>::epsilon()) if(d0<=Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.col(1) = eivecs.col(k).unitOrthogonal(); eivecs.col(1) = eivecs.col(k).unitOrthogonal();
}
else else
{ {
n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm(); n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
if(n>safeNorm2) if(n>safeNorm2)
{
eivecs.col(1) = cross / sqrt(n); eivecs.col(1) = cross / sqrt(n);
}
else else
{ {
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm(); n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
@ -636,13 +645,14 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
else else
{ {
// we should never reach this point, // we should never reach this point,
// if so the last two eigenvalues are likely to ve very closed to each other // if so the last two eigenvalues are likely to be very close to each other
eivecs.col(1) = eivecs.col(k).unitOrthogonal(); eivecs.col(1) = eivecs.col(k).unitOrthogonal();
} }
} }
} }
// make sure that eivecs[1] is orthogonal to eivecs[2] // make sure that eivecs[1] is orthogonal to eivecs[2]
// FIXME: this step should not be needed
Scalar d = eivecs.col(1).dot(eivecs.col(k)); Scalar d = eivecs.col(1).dot(eivecs.col(k));
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized(); eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
} }

View File

@ -100,7 +100,17 @@ public:
{ {
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
Hyperplane result(p0.size()); Hyperplane result(p0.size());
result.normal() = (p2 - p0).cross(p1 - p0).normalized(); VectorType v0(p2 - p0), v1(p1 - p0);
result.normal() = v0.cross(v1);
RealScalar norm = result.normal().norm();
if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())
{
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
result.normal() = svd.matrixV().col(2);
}
else
result.normal() /= norm;
result.offset() = -p0.dot(result.normal()); result.offset() = -p0.dot(result.normal());
return result; return result;
} }

View File

@ -61,6 +61,9 @@ public:
/** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
inline Rotation2D(const Scalar& a) : m_angle(a) {} inline Rotation2D(const Scalar& a) : m_angle(a) {}
/** Default constructor wihtout initialization. The represented rotation is undefined. */
Rotation2D() {}
/** \returns the rotation angle */ /** \returns the rotation angle */
inline Scalar angle() const { return m_angle; } inline Scalar angle() const { return m_angle; }
@ -84,7 +87,7 @@ public:
template<typename Derived> template<typename Derived>
Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m); Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
Matrix2 toRotationMatrix(void) const; Matrix2 toRotationMatrix() const;
/** \returns the spherical interpolation between \c *this and \a other using /** \returns the spherical interpolation between \c *this and \a other using
* parameter \a t. It is in fact equivalent to a linear interpolation. * parameter \a t. It is in fact equivalent to a linear interpolation.

View File

@ -62,6 +62,8 @@ struct transform_construct_from_matrix;
template<typename TransformType> struct transform_take_affine_part; template<typename TransformType> struct transform_take_affine_part;
template<int Mode> struct transform_make_affine;
} // end namespace internal } // end namespace internal
/** \geometry_module \ingroup Geometry_Module /** \geometry_module \ingroup Geometry_Module
@ -230,8 +232,7 @@ public:
inline Transform() inline Transform()
{ {
check_template_params(); check_template_params();
if (int(Mode)==Affine) internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
makeAffine();
} }
inline Transform(const Transform& other) inline Transform(const Transform& other)
@ -591,11 +592,7 @@ public:
*/ */
void makeAffine() void makeAffine()
{ {
if(int(Mode)!=int(AffineCompact)) internal::transform_make_affine<int(Mode)>::run(m_matrix);
{
matrix().template block<1,Dim>(Dim,0).setZero();
matrix().coeffRef(Dim,Dim) = Scalar(1);
}
} }
/** \internal /** \internal
@ -1079,6 +1076,24 @@ Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBas
namespace internal { namespace internal {
template<int Mode>
struct transform_make_affine
{
template<typename MatrixType>
static void run(MatrixType &mat)
{
static const int Dim = MatrixType::ColsAtCompileTime-1;
mat.template block<1,Dim>(Dim,0).setZero();
mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1);
}
};
template<>
struct transform_make_affine<AffineCompact>
{
template<typename MatrixType> static void run(MatrixType &) { }
};
// selector needed to avoid taking the inverse of a 3x4 matrix // selector needed to avoid taking the inverse of a 3x4 matrix
template<typename TransformType, int Mode=TransformType::Mode> template<typename TransformType, int Mode=TransformType::Mode>
struct projective_transform_inverse struct projective_transform_inverse

View File

@ -39,7 +39,6 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
int maxIters = iters; int maxIters = iters;
int n = mat.cols(); int n = mat.cols();
x = precond.solve(x);
VectorType r = rhs - mat * x; VectorType r = rhs - mat * x;
VectorType r0 = r; VectorType r0 = r;
@ -143,7 +142,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
* SparseMatrix<double> A(n,n); * SparseMatrix<double> A(n,n);
* // fill A and b * // fill A and b
* BiCGSTAB<SparseMatrix<double> > solver; * BiCGSTAB<SparseMatrix<double> > solver;
* solver(A); * solver.compute(A);
* x = solver.solve(b); * x = solver.solve(b);
* std::cout << "#iterations: " << solver.iterations() << std::endl; * std::cout << "#iterations: " << solver.iterations() << std::endl;
* std::cout << "estimated error: " << solver.error() << std::endl; * std::cout << "estimated error: " << solver.error() << std::endl;

View File

@ -219,7 +219,7 @@ class PardisoImpl
void pardisoInit(int type) void pardisoInit(int type)
{ {
m_type = type; m_type = type;
bool symmetric = abs(m_type) < 10; bool symmetric = std::abs(m_type) < 10;
m_iparm[0] = 1; // No solver default m_iparm[0] = 1; // No solver default
m_iparm[1] = 3; // use Metis for the ordering m_iparm[1] = 3; // use Metis for the ordering
m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS

View File

@ -762,6 +762,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols; internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows; internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
MatrixType m_scaledMatrix;
}; };
template<typename MatrixType, int QRPreconditioner> template<typename MatrixType, int QRPreconditioner>
@ -808,8 +809,9 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
: 0); : 0);
m_workMatrix.resize(m_diagSize, m_diagSize); m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
if(m_cols!=m_cols) m_scaledMatrix.resize(rows,cols);
} }
template<typename MatrixType, int QRPreconditioner> template<typename MatrixType, int QRPreconditioner>
@ -826,22 +828,27 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min(); const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
// Scaling factor to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix)) if(m_rows!=m_cols)
{ {
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize); m_scaledMatrix = matrix / scale;
m_qr_precond_morecols.run(*this, m_scaledMatrix);
m_qr_precond_morerows.run(*this, m_scaledMatrix);
}
else
{
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
} }
// Scaling factor to reduce over/under-flows
RealScalar scale = m_workMatrix.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
m_workMatrix /= scale;
/*** step 2. The main Jacobi SVD iteration. ***/ /*** step 2. The main Jacobi SVD iteration. ***/
bool finished = false; bool finished = false;
@ -861,7 +868,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
using std::max; using std::max;
RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
abs(m_workMatrix.coeff(q,q)))); abs(m_workMatrix.coeff(q,q))));
if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold) // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{ {
finished = false; finished = false;

View File

@ -69,7 +69,7 @@ class AmbiVector
delete[] m_buffer; delete[] m_buffer;
if (size<1000) if (size<1000)
{ {
Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar); Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
m_buffer = new Scalar[allocSize]; m_buffer = new Scalar[allocSize];
} }
@ -88,7 +88,7 @@ class AmbiVector
Index copyElements = m_allocatedElements; Index copyElements = m_allocatedElements;
m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
Index allocSize = m_allocatedElements * sizeof(ListEl); Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize]; Scalar* newBuffer = new Scalar[allocSize];
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
delete[] m_buffer; delete[] m_buffer;

View File

@ -68,6 +68,8 @@ public:
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
private:
Index nonZeros() const;
}; };
@ -82,6 +84,7 @@ class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true
typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested; typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType; typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> ConstBlockType;
public: public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
@ -245,6 +248,93 @@ public:
}; };
template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
class BlockImpl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
: public SparseMatrixBase<Block<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
{
typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> BlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
protected:
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
public:
class InnerIterator: public SparseMatrixType::InnerIterator
{
public:
inline InnerIterator(const BlockType& xpr, Index outer)
: SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
Index m_outer;
};
class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
{
public:
inline ReverseInnerIterator(const BlockType& xpr, Index outer)
: SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
Index m_outer;
};
inline BlockImpl(const SparseMatrixType& xpr, int i)
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
{}
inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
{}
inline const Scalar* valuePtr() const
{ return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
inline const Index* innerIndexPtr() const
{ return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
inline const Index* outerIndexPtr() const
{ return m_matrix.outerIndexPtr() + m_outerStart; }
Index nonZeros() const
{
if(m_matrix.isCompressed())
return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
- std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
else if(m_outerSize.value()==0)
return 0;
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
const Scalar& lastCoeff() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
eigen_assert(nonZeros()>0);
if(m_matrix.isCompressed())
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
else
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
typename SparseMatrixType::Nested m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
};
//---------- //----------
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this

View File

@ -306,15 +306,6 @@ class DenseTimeSparseProduct
DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&); DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
}; };
// sparse * dense
template<typename Derived>
template<typename OtherDerived>
inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
{
return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
}
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H #endif // EIGEN_SPARSEDENSEPRODUCT_H

View File

@ -358,7 +358,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
/** sparse * dense (returns a dense object unless it is an outer product) */ /** sparse * dense (returns a dense object unless it is an outer product) */
template<typename OtherDerived> template<typename OtherDerived>
const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
operator*(const MatrixBase<OtherDerived> &other) const; operator*(const MatrixBase<OtherDerived> &other) const
{ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const

View File

@ -61,7 +61,7 @@ struct permut_sparsematrix_product_retval
for(Index j=0; j<m_matrix.outerSize(); ++j) for(Index j=0; j<m_matrix.outerSize(); ++j)
{ {
Index jp = m_permutation.indices().coeff(j); Index jp = m_permutation.indices().coeff(j);
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).size(); sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros();
} }
tmp.reserve(sizes); tmp.reserve(sizes);
for(Index j=0; j<m_matrix.outerSize(); ++j) for(Index j=0; j<m_matrix.outerSize(); ++j)

View File

@ -260,14 +260,13 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
// Initialize with the determinant of the row matrix // Initialize with the determinant of the row matrix
Scalar det = Scalar(1.); Scalar det = Scalar(1.);
//Note that the diagonal blocks of U are stored in supernodes, // Note that the diagonal blocks of U are stored in supernodes,
// which are available in the L part :) // which are available in the L part :)
for (Index j = 0; j < this->cols(); ++j) for (Index j = 0; j < this->cols(); ++j)
{ {
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
{ {
if(it.row() < j) continue; if(it.index() == j)
if(it.row() == j)
{ {
det *= (std::abs)(it.value()); det *= (std::abs)(it.value());
break; break;

View File

@ -189,8 +189,8 @@ class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
m_idval(mat.colIndexPtr()[outer]), m_idval(mat.colIndexPtr()[outer]),
m_startidval(m_idval), m_startidval(m_idval),
m_endidval(mat.colIndexPtr()[outer+1]), m_endidval(mat.colIndexPtr()[outer+1]),
m_idrow(mat.rowIndexPtr()[outer]), m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]),
m_endidrow(mat.rowIndexPtr()[outer+1]) m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1])
{} {}
inline InnerIterator& operator++() inline InnerIterator& operator++()
{ {

View File

@ -75,7 +75,7 @@ class SparseQR
typedef Matrix<Scalar, Dynamic, 1> ScalarVector; typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType; typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
public: public:
SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{ } { }
/** Construct a QR factorization of the matrix \a mat. /** Construct a QR factorization of the matrix \a mat.
@ -84,7 +84,7 @@ class SparseQR
* *
* \sa compute() * \sa compute()
*/ */
SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{ {
compute(mat); compute(mat);
} }
@ -262,6 +262,7 @@ class SparseQR
IndexVector m_etree; // Column elimination tree IndexVector m_etree; // Column elimination tree
IndexVector m_firstRowElt; // First element in each row IndexVector m_firstRowElt; // First element in each row
bool m_isQSorted; // whether Q is sorted or not bool m_isQSorted; // whether Q is sorted or not
bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
template <typename, typename > friend struct SparseQR_QProduct; template <typename, typename > friend struct SparseQR_QProduct;
template <typename > friend struct SparseQRMatrixQReturnType; template <typename > friend struct SparseQRMatrixQReturnType;
@ -281,9 +282,11 @@ template <typename MatrixType, typename OrderingType>
void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat) void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
{ {
eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR");
// Copy to a column major matrix if the input is rowmajor
typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
// Compute the column fill reducing ordering // Compute the column fill reducing ordering
OrderingType ord; OrderingType ord;
ord(mat, m_perm_c); ord(matCpy, m_perm_c);
Index n = mat.cols(); Index n = mat.cols();
Index m = mat.rows(); Index m = mat.rows();
Index diagSize = (std::min)(m,n); Index diagSize = (std::min)(m,n);
@ -296,7 +299,8 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
// Compute the column elimination tree of the permuted matrix // Compute the column elimination tree of the permuted matrix
m_outputPerm_c = m_perm_c.inverse(); m_outputPerm_c = m_perm_c.inverse();
internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_isEtreeOk = true;
m_R.resize(m, n); m_R.resize(m, n);
m_Q.resize(m, diagSize); m_Q.resize(m, diagSize);
@ -331,14 +335,37 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
ScalarVector tval(m); // The dense vector used to compute the current column ScalarVector tval(m); // The dense vector used to compute the current column
RealScalar pivotThreshold = m_threshold; RealScalar pivotThreshold = m_threshold;
m_R.setZero();
m_Q.setZero();
m_pmat = mat; m_pmat = mat;
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated if(!m_isEtreeOk)
// Apply the fill-in reducing permutation lazily:
for (int i = 0; i < n; i++)
{ {
Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; m_outputPerm_c = m_perm_c.inverse();
m_pmat.outerIndexPtr()[p] = mat.outerIndexPtr()[i]; internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i]; m_isEtreeOk = true;
}
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
// Apply the fill-in reducing permutation lazily:
{
// If the input is row major, copy the original column indices,
// otherwise directly use the input matrix
//
IndexVector originalOuterIndicesCpy;
const Index *originalOuterIndices = mat.outerIndexPtr();
if(MatrixType::IsRowMajor)
{
originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
originalOuterIndices = originalOuterIndicesCpy.data();
}
for (int i = 0; i < n; i++)
{
Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
m_pmat.outerIndexPtr()[p] = originalOuterIndices[i];
m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i];
}
} }
/* Compute the default threshold as in MatLab, see: /* Compute the default threshold as in MatLab, see:
@ -349,6 +376,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{ {
RealScalar max2Norm = 0.0; RealScalar max2Norm = 0.0;
for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm());
if(max2Norm==RealScalar(0))
max2Norm = RealScalar(1);
pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon(); pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
} }
@ -373,7 +402,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k. // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
// Note: if the diagonal entry does not exist, then its contribution must be explicitly added, // Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
// thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
{ {
Index curIdx = nonzeroCol; Index curIdx = nonzeroCol;
if(itp) curIdx = itp.row(); if(itp) curIdx = itp.row();
@ -447,7 +476,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
} }
} // End update current column } // End update current column
Scalar tau; Scalar tau = 0;
RealScalar beta = 0; RealScalar beta = 0;
if(nonzeroCol < diagSize) if(nonzeroCol < diagSize)
@ -461,7 +490,6 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
{ {
tau = RealScalar(0);
beta = numext::real(c0); beta = numext::real(c0);
tval(Qidx(0)) = 1; tval(Qidx(0)) = 1;
} }
@ -514,6 +542,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// Recompute the column elimination tree // Recompute the column elimination tree
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data()); internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
m_isEtreeOk = false;
} }
} }
@ -531,7 +560,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
if(nonzeroCol<n) if(nonzeroCol<n)
{ {
// Permute the triangular factor to put the 'dead' columns to the end // Permute the triangular factor to put the 'dead' columns to the end
MatrixType tempR(m_R); QRMatrixType tempR(m_R);
m_R = tempR * m_pivotperm; m_R = tempR * m_pivotperm;
// Update the column permutation // Update the column permutation

View File

@ -107,6 +107,16 @@ inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *N
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info); return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
} }
namespace internal {
template<typename T> struct umfpack_helper_is_sparse_plain : false_type {};
template<typename Scalar, int Options, typename StorageIndex>
struct umfpack_helper_is_sparse_plain<SparseMatrix<Scalar,Options,StorageIndex> >
: true_type {};
template<typename Scalar, int Options, typename StorageIndex>
struct umfpack_helper_is_sparse_plain<MappedSparseMatrix<Scalar,Options,StorageIndex> >
: true_type {};
}
/** \ingroup UmfPackSupport_Module /** \ingroup UmfPackSupport_Module
* \brief A sparse LU factorization and solver based on UmfPack * \brief A sparse LU factorization and solver based on UmfPack
* *
@ -192,10 +202,14 @@ class UmfPackLU : internal::noncopyable
* Note that the matrix should be column-major, and in compressed format for best performance. * Note that the matrix should be column-major, and in compressed format for best performance.
* \sa SparseMatrix::makeCompressed(). * \sa SparseMatrix::makeCompressed().
*/ */
void compute(const MatrixType& matrix) template<typename InputMatrixType>
void compute(const InputMatrixType& matrix)
{ {
analyzePattern(matrix); if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
factorize(matrix); if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
grapInput(matrix.derived());
analyzePattern_impl();
factorize_impl();
} }
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
@ -230,23 +244,15 @@ class UmfPackLU : internal::noncopyable
* *
* \sa factorize(), compute() * \sa factorize(), compute()
*/ */
void analyzePattern(const MatrixType& matrix) template<typename InputMatrixType>
void analyzePattern(const InputMatrixType& matrix)
{ {
if(m_symbolic) if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
umfpack_free_symbolic(&m_symbolic,Scalar()); if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
if(m_numeric)
umfpack_free_numeric(&m_numeric,Scalar());
grapInput(matrix); grapInput(matrix.derived());
int errorCode = 0; analyzePattern_impl();
errorCode = umfpack_symbolic(matrix.rows(), matrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
&m_symbolic, 0, 0);
m_isInitialized = true;
m_info = errorCode ? InvalidInput : Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
} }
/** Performs a numeric decomposition of \a matrix /** Performs a numeric decomposition of \a matrix
@ -255,20 +261,16 @@ class UmfPackLU : internal::noncopyable
* *
* \sa analyzePattern(), compute() * \sa analyzePattern(), compute()
*/ */
void factorize(const MatrixType& matrix) template<typename InputMatrixType>
void factorize(const InputMatrixType& matrix)
{ {
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()"); eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
if(m_numeric) if(m_numeric)
umfpack_free_numeric(&m_numeric,Scalar()); umfpack_free_numeric(&m_numeric,Scalar());
grapInput(matrix); grapInput(matrix.derived());
int errorCode; factorize_impl();
errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
m_symbolic, &m_numeric, 0, 0);
m_info = errorCode ? NumericalIssue : Success;
m_factorizationIsOk = true;
} }
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
@ -283,19 +285,20 @@ class UmfPackLU : internal::noncopyable
protected: protected:
void init() void init()
{ {
m_info = InvalidInput; m_info = InvalidInput;
m_isInitialized = false; m_isInitialized = false;
m_numeric = 0; m_numeric = 0;
m_symbolic = 0; m_symbolic = 0;
m_outerIndexPtr = 0; m_outerIndexPtr = 0;
m_innerIndexPtr = 0; m_innerIndexPtr = 0;
m_valuePtr = 0; m_valuePtr = 0;
m_extractedDataAreDirty = true;
} }
void grapInput(const MatrixType& mat) template<typename InputMatrixType>
void grapInput_impl(const InputMatrixType& mat, internal::true_type)
{ {
m_copyMatrix.resize(mat.rows(), mat.cols()); m_copyMatrix.resize(mat.rows(), mat.cols());
if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() ) if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() )
@ -314,6 +317,45 @@ class UmfPackLU : internal::noncopyable
} }
} }
template<typename InputMatrixType>
void grapInput_impl(const InputMatrixType& mat, internal::false_type)
{
m_copyMatrix = mat;
m_outerIndexPtr = m_copyMatrix.outerIndexPtr();
m_innerIndexPtr = m_copyMatrix.innerIndexPtr();
m_valuePtr = m_copyMatrix.valuePtr();
}
template<typename InputMatrixType>
void grapInput(const InputMatrixType& mat)
{
grapInput_impl(mat, internal::umfpack_helper_is_sparse_plain<InputMatrixType>());
}
void analyzePattern_impl()
{
int errorCode = 0;
errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
&m_symbolic, 0, 0);
m_isInitialized = true;
m_info = errorCode ? InvalidInput : Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_extractedDataAreDirty = true;
}
void factorize_impl()
{
int errorCode;
errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
m_symbolic, &m_numeric, 0, 0);
m_info = errorCode ? NumericalIssue : Success;
m_factorizationIsOk = true;
m_extractedDataAreDirty = true;
}
// cached data to reduce reallocation, etc. // cached data to reduce reallocation, etc.
mutable LUMatrixType m_l; mutable LUMatrixType m_l;
mutable LUMatrixType m_u; mutable LUMatrixType m_u;

View File

@ -452,20 +452,12 @@ macro(ei_set_build_string)
endmacro(ei_set_build_string) endmacro(ei_set_build_string)
macro(ei_is_64bit_env VAR) macro(ei_is_64bit_env VAR)
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/is64.cpp"
"int main() { return (sizeof(int*) == 8 ? 1 : 0); }
")
try_run(run_res compile_res
${CMAKE_CURRENT_BINARY_DIR} "${CMAKE_CURRENT_BINARY_DIR}/is64.cpp"
RUN_OUTPUT_VARIABLE run_output)
if(compile_res AND run_res)
set(${VAR} ${run_res})
elseif(CMAKE_CL_64)
set(${VAR} 1)
elseif("$ENV{Platform}" STREQUAL "X64") # nmake 64 bit
set(${VAR} 1) set(${VAR} 1)
elseif(CMAKE_SIZEOF_VOID_P EQUAL 4)
set(${VAR} 0)
else()
message(WARNING "Unsupported pointer size. Please contact the authors.")
endif() endif()
endmacro(ei_is_64bit_env) endmacro(ei_is_64bit_env)

View File

@ -86,4 +86,4 @@ include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(CHOLMOD DEFAULT_MSG find_package_handle_standard_args(CHOLMOD DEFAULT_MSG
CHOLMOD_INCLUDES CHOLMOD_LIBRARIES) CHOLMOD_INCLUDES CHOLMOD_LIBRARIES)
mark_as_advanced(CHOLMOD_INCLUDES CHOLMOD_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY) mark_as_advanced(CHOLMOD_INCLUDES CHOLMOD_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY CAMD_LIBRARY CCOLAMD_LIBRARY CHOLMOD_METIS_LIBRARY)

View File

@ -115,5 +115,5 @@ include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(FFTW DEFAULT_MSG find_package_handle_standard_args(FFTW DEFAULT_MSG
FFTW_INCLUDES FFTW_LIBRARIES) FFTW_INCLUDES FFTW_LIBRARIES)
mark_as_advanced(FFTW_INCLUDES FFTW_LIBRARIES) mark_as_advanced(FFTW_INCLUDES FFTW_LIBRARIES FFTW_LIB FFTWF_LIB FFTWL_LIB)

View File

@ -11,15 +11,49 @@ find_path(METIS_INCLUDES
$ENV{METISDIR} $ENV{METISDIR}
${INCLUDE_INSTALL_DIR} ${INCLUDE_INSTALL_DIR}
PATH_SUFFIXES PATH_SUFFIXES
.
metis metis
include include
) )
macro(_metis_check_version)
file(READ "${METIS_INCLUDES}/metis.h" _metis_version_header)
string(REGEX MATCH "define[ \t]+METIS_VER_MAJOR[ \t]+([0-9]+)" _metis_major_version_match "${_metis_version_header}")
set(METIS_MAJOR_VERSION "${CMAKE_MATCH_1}")
string(REGEX MATCH "define[ \t]+METIS_VER_MINOR[ \t]+([0-9]+)" _metis_minor_version_match "${_metis_version_header}")
set(METIS_MINOR_VERSION "${CMAKE_MATCH_1}")
string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}")
set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}")
if(NOT METIS_MAJOR_VERSION)
message(WARNING "Could not determine Metis version. Assuming version 4.0.0")
set(METIS_VERSION 4.0.0)
else()
set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION})
endif()
if(${METIS_VERSION} VERSION_LESS ${Metis_FIND_VERSION})
set(METIS_VERSION_OK FALSE)
else()
set(METIS_VERSION_OK TRUE)
endif()
if(NOT METIS_VERSION_OK)
message(STATUS "Metis version ${METIS_VERSION} found in ${METIS_INCLUDES}, "
"but at least version ${Metis_FIND_VERSION} is required")
endif(NOT METIS_VERSION_OK)
endmacro(_metis_check_version)
if(METIS_INCLUDES AND Metis_FIND_VERSION)
_metis_check_version()
else()
set(METIS_VERSION_OK TRUE)
endif()
find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR} PATH_SUFFIXES lib) find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR} PATH_SUFFIXES lib)
include(FindPackageHandleStandardArgs) include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(METIS DEFAULT_MSG find_package_handle_standard_args(METIS DEFAULT_MSG
METIS_INCLUDES METIS_LIBRARIES) METIS_INCLUDES METIS_LIBRARIES METIS_VERSION_OK)
mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES) mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES)

View File

@ -33,7 +33,7 @@ function(workaround_9220 language language_works)
file(WRITE ${CMAKE_BINARY_DIR}/language_tests/${language}/CMakeLists.txt file(WRITE ${CMAKE_BINARY_DIR}/language_tests/${language}/CMakeLists.txt
${text}) ${text})
execute_process( execute_process(
COMMAND ${CMAKE_COMMAND} . COMMAND ${CMAKE_COMMAND} . -G "${CMAKE_GENERATOR}"
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language}
RESULT_VARIABLE return_code RESULT_VARIABLE return_code
OUTPUT_QUIET OUTPUT_QUIET

View File

@ -67,10 +67,10 @@ P.rightCols<cols>() // P(:, end-cols+1:end)
P.rightCols(cols) // P(:, end-cols+1:end) P.rightCols(cols) // P(:, end-cols+1:end)
P.topRows<rows>() // P(1:rows, :) P.topRows<rows>() // P(1:rows, :)
P.topRows(rows) // P(1:rows, :) P.topRows(rows) // P(1:rows, :)
P.middleRows<rows>(i) // P(:, i+1:i+rows) P.middleRows<rows>(i) // P(i+1:i+rows, :)
P.middleRows(i, rows) // P(:, i+1:i+rows) P.middleRows(i, rows) // P(i+1:i+rows, :)
P.bottomRows<rows>() // P(:, end-rows+1:end) P.bottomRows<rows>() // P(end-rows+1:end, :)
P.bottomRows(rows) // P(:, end-rows+1:end) P.bottomRows(rows) // P(end-rows+1:end, :)
P.topLeftCorner(rows, cols) // P(1:rows, 1:cols) P.topLeftCorner(rows, cols) // P(1:rows, 1:cols)
P.topRightCorner(rows, cols) // P(1:rows, end-cols+1:end) P.topRightCorner(rows, cols) // P(1:rows, end-cols+1:end)
P.bottomLeftCorner(rows, cols) // P(end-rows+1:end, 1:cols) P.bottomLeftCorner(rows, cols) // P(end-rows+1:end, 1:cols)

View File

@ -71,11 +71,10 @@ i.e either row major or column major. The default is column major. Most arithmet
<td> Constant or Random Insertion</td> <td> Constant or Random Insertion</td>
<td> <td>
\code \code
sm1.setZero(); // Set the matrix with zero elements sm1.setZero();
sm1.setConstant(val); //Replace all the nonzero values with val
\endcode \endcode
</td> </td>
<td> The matrix sm1 should have been created before ???</td> <td>Remove all non-zero coefficients</td>
</tr> </tr>
</table> </table>

View File

@ -6,12 +6,10 @@ foreach(example_src ${examples_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) target_link_libraries(${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif() endif()
get_target_property(example_executable
${example} LOCATION)
add_custom_command( add_custom_command(
TARGET ${example} TARGET ${example}
POST_BUILD POST_BUILD
COMMAND ${example_executable} COMMAND ${example}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
) )
add_dependencies(all_examples ${example}) add_dependencies(all_examples ${example})

View File

@ -14,12 +14,10 @@ foreach(snippet_src ${snippets_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif() endif()
get_target_property(compile_snippet_executable
${compile_snippet_target} LOCATION)
add_custom_command( add_custom_command(
TARGET ${compile_snippet_target} TARGET ${compile_snippet_target}
POST_BUILD POST_BUILD
COMMAND ${compile_snippet_executable} COMMAND ${compile_snippet_target}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
) )
add_dependencies(all_snippets ${compile_snippet_target}) add_dependencies(all_snippets ${compile_snippet_target})

View File

@ -1,4 +1,3 @@
if(NOT EIGEN_TEST_NOQT) if(NOT EIGEN_TEST_NOQT)
find_package(Qt4) find_package(Qt4)
if(QT4_FOUND) if(QT4_FOUND)
@ -6,7 +5,6 @@ if(NOT EIGEN_TEST_NOQT)
endif() endif()
endif(NOT EIGEN_TEST_NOQT) endif(NOT EIGEN_TEST_NOQT)
if(QT4_FOUND) if(QT4_FOUND)
add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp) add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp)
target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY}) target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY})
@ -19,3 +17,4 @@ if(QT4_FOUND)
add_dependencies(all_examples Tutorial_sparse_example) add_dependencies(all_examples Tutorial_sparse_example)
endif(QT4_FOUND) endif(QT4_FOUND)

View File

@ -26,6 +26,12 @@ ei_add_failtest("block_on_const_type_actually_const_1")
ei_add_failtest("transpose_on_const_type_actually_const") ei_add_failtest("transpose_on_const_type_actually_const")
ei_add_failtest("diagonal_on_const_type_actually_const") ei_add_failtest("diagonal_on_const_type_actually_const")
ei_add_failtest("ref_1")
ei_add_failtest("ref_2")
ei_add_failtest("ref_3")
ei_add_failtest("ref_4")
ei_add_failtest("ref_5")
if (EIGEN_FAILTEST_FAILURE_COUNT) if (EIGEN_FAILTEST_FAILURE_COUNT)
message(FATAL_ERROR message(FATAL_ERROR
"${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. " "${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. "

View File

@ -66,7 +66,7 @@ endif()
find_package(Pastix) find_package(Pastix)
find_package(Scotch) find_package(Scotch)
find_package(Metis) find_package(Metis 5.0 REQUIRED)
if(PASTIX_FOUND AND BLAS_FOUND) if(PASTIX_FOUND AND BLAS_FOUND)
add_definitions("-DEIGEN_PASTIX_SUPPORT") add_definitions("-DEIGEN_PASTIX_SUPPORT")
include_directories(${PASTIX_INCLUDES}) include_directories(${PASTIX_INCLUDES})
@ -279,6 +279,7 @@ ei_add_property(EIGEN_TESTING_SUMMARY "CXX_FLAGS: ${CMAKE_CXX_FLAGS}\n")
ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags: ${SPARSE_LIBS}\n") ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags: ${SPARSE_LIBS}\n")
option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF) option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF)
mark_as_advanced(EIGEN_TEST_EIGEN2)
if(EIGEN_TEST_EIGEN2) if(EIGEN_TEST_EIGEN2)
add_subdirectory(eigen2) add_subdirectory(eigen2)
endif() endif()

View File

@ -320,33 +320,35 @@ template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
{ {
eigen_assert(m.rows() == 2 && m.cols() == 2); eigen_assert(m.rows() == 2 && m.cols() == 2);
MatrixType mat; MatrixType mat;
LDLT<MatrixType> ldlt(2);
{ {
mat << 1, 0, 0, -1; mat << 1, 0, 0, -1;
LDLT<MatrixType> ldlt(mat); ldlt.compute(mat);
VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive()); VERIFY(!ldlt.isPositive());
} }
{ {
mat << 1, 2, 2, 1; mat << 1, 2, 2, 1;
LDLT<MatrixType> ldlt(mat); ldlt.compute(mat);
VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive()); VERIFY(!ldlt.isPositive());
} }
{ {
mat << 0, 0, 0, 0; mat << 0, 0, 0, 0;
LDLT<MatrixType> ldlt(mat); ldlt.compute(mat);
VERIFY(ldlt.isNegative()); VERIFY(ldlt.isNegative());
VERIFY(ldlt.isPositive()); VERIFY(ldlt.isPositive());
} }
{ {
mat << 0, 0, 0, 1; mat << 0, 0, 0, 1;
LDLT<MatrixType> ldlt(mat); ldlt.compute(mat);
VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isNegative());
VERIFY(ldlt.isPositive()); VERIFY(ldlt.isPositive());
} }
{ {
mat << -1, 0, 0, 0; mat << -1, 0, 0, 0;
LDLT<MatrixType> ldlt(mat); ldlt.compute(mat);
VERIFY(ldlt.isNegative()); VERIFY(ldlt.isNegative());
VERIFY(!ldlt.isPositive()); VERIFY(!ldlt.isPositive());
} }

View File

@ -9,6 +9,8 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN2_SUPPORT #define EIGEN2_SUPPORT
#define EIGEN_NO_EIGEN2_DEPRECATED_WARNING
#define EIGEN_NO_STATIC_ASSERT #define EIGEN_NO_STATIC_ASSERT
#include "main.h" #include "main.h"
#include <functional> #include <functional>

View File

@ -87,32 +87,6 @@ template<typename T> void check_dynaligned()
delete obj; delete obj;
} }
template<typename T> void check_custom_new_delete()
{
{
T* t = new T;
delete t;
}
{
std::size_t N = internal::random<std::size_t>(1,10);
T* t = new T[N];
delete[] t;
}
#ifdef EIGEN_ALIGN
{
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
(T::operator delete)(t, sizeof(T));
}
{
T* t = static_cast<T *>((T::operator new)(sizeof(T)));
(T::operator delete)(t);
}
#endif
}
void test_dynalloc() void test_dynalloc()
{ {
// low level dynamic memory allocation // low level dynamic memory allocation
@ -128,12 +102,6 @@ void test_dynalloc()
CALL_SUBTEST(check_dynaligned<Matrix4f>() ); CALL_SUBTEST(check_dynaligned<Matrix4f>() );
CALL_SUBTEST(check_dynaligned<Vector4d>() ); CALL_SUBTEST(check_dynaligned<Vector4d>() );
CALL_SUBTEST(check_dynaligned<Vector4i>() ); CALL_SUBTEST(check_dynaligned<Vector4i>() );
CALL_SUBTEST(check_dynaligned<Vector8f>() );
CALL_SUBTEST( check_custom_new_delete<Vector4f>() );
CALL_SUBTEST( check_custom_new_delete<Vector2f>() );
CALL_SUBTEST( check_custom_new_delete<Matrix4f>() );
CALL_SUBTEST( check_custom_new_delete<MatrixXi>() );
} }
// check static allocation, who knows ? // check static allocation, who knows ?

View File

@ -4,6 +4,7 @@ add_dependencies(eigen2_check eigen2_buildtests)
add_dependencies(buildtests eigen2_buildtests) add_dependencies(buildtests eigen2_buildtests)
add_definitions("-DEIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API") add_definitions("-DEIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API")
add_definitions("-DEIGEN_NO_EIGEN2_DEPRECATED_WARNING")
ei_add_test(eigen2_meta) ei_add_test(eigen2_meta)
ei_add_test(eigen2_sizeof) ei_add_test(eigen2_sizeof)

View File

@ -29,8 +29,6 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
MatrixType m1 = MatrixType::Random(rows, cols), MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols),
m3(rows, cols), m3(rows, cols),
mzero = MatrixType::Zero(rows, cols),
identity = SquareMatrixType::Identity(rows, rows),
square = SquareMatrixType::Random(rows, rows); square = SquareMatrixType::Random(rows, rows);
VectorType v1 = VectorType::Random(rows), VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows), v2 = VectorType::Random(rows),

View File

@ -23,11 +23,8 @@ template<typename MatrixType> void basicStuff(const MatrixType& m)
m2 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols),
m3(rows, cols), m3(rows, cols),
mzero = MatrixType::Zero(rows, cols), mzero = MatrixType::Zero(rows, cols),
identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Identity(rows, rows),
square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>::Random(rows, rows); square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>::Random(rows, rows);
VectorType v1 = VectorType::Random(rows), VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows),
vzero = VectorType::Zero(rows); vzero = VectorType::Zero(rows);
Scalar x = ei_random<Scalar>(); Scalar x = ei_random<Scalar>();

View File

@ -35,11 +35,8 @@ template<typename MatrixType> void cwiseops(const MatrixType& m)
mzero = MatrixType::Zero(rows, cols), mzero = MatrixType::Zero(rows, cols),
mones = MatrixType::Ones(rows, cols), mones = MatrixType::Ones(rows, cols),
identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Identity(rows, rows), ::Identity(rows, rows);
square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>::Random(rows, rows); VectorType vzero = VectorType::Zero(rows),
VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows),
vzero = VectorType::Zero(rows),
vones = VectorType::Ones(rows), vones = VectorType::Ones(rows),
v3(rows); v3(rows);

View File

@ -392,6 +392,7 @@ template<typename Scalar> void geometry(void)
#define VERIFY_EULER(I,J,K, X,Y,Z) { \ #define VERIFY_EULER(I,J,K, X,Y,Z) { \
Vector3 ea = m.eulerAngles(I,J,K); \ Vector3 ea = m.eulerAngles(I,J,K); \
Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \ Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \
VERIFY_IS_APPROX(m, m1); \
VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \ VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \
} }
VERIFY_EULER(0,1,2, X,Y,Z); VERIFY_EULER(0,1,2, X,Y,Z);

View File

@ -394,6 +394,7 @@ template<typename Scalar> void geometry(void)
#define VERIFY_EULER(I,J,K, X,Y,Z) { \ #define VERIFY_EULER(I,J,K, X,Y,Z) { \
Vector3 ea = m.eulerAngles(I,J,K); \ Vector3 ea = m.eulerAngles(I,J,K); \
Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \ Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \
VERIFY_IS_APPROX(m, m1); \
VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \ VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \
} }
VERIFY_EULER(0,1,2, X,Y,Z); VERIFY_EULER(0,1,2, X,Y,Z);

View File

@ -25,7 +25,6 @@ template<typename MatrixType> void inverse(const MatrixType& m)
MatrixType m1 = MatrixType::Random(rows, cols), MatrixType m1 = MatrixType::Random(rows, cols),
m2(rows, cols), m2(rows, cols),
mzero = MatrixType::Zero(rows, cols),
identity = MatrixType::Identity(rows, rows); identity = MatrixType::Identity(rows, rows);
while(ei_abs(m1.determinant()) < RealScalar(0.1) && rows <= 8) while(ei_abs(m1.determinant()) < RealScalar(0.1) && rows <= 8)

View File

@ -25,8 +25,7 @@ template<typename MatrixType> void linearStructure(const MatrixType& m)
// to test it, hence I consider that we will have tested Random.h // to test it, hence I consider that we will have tested Random.h
MatrixType m1 = MatrixType::Random(rows, cols), MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols),
m3(rows, cols), m3(rows, cols);
mzero = MatrixType::Zero(rows, cols);
Scalar s1 = ei_random<Scalar>(); Scalar s1 = ei_random<Scalar>();
while (ei_abs(s1)<1e-3) s1 = ei_random<Scalar>(); while (ei_abs(s1)<1e-3) s1 = ei_random<Scalar>();

View File

@ -25,22 +25,12 @@ template<typename MatrixType> void nomalloc(const MatrixType& m)
*/ */
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
int rows = m.rows(); int rows = m.rows();
int cols = m.cols(); int cols = m.cols();
MatrixType m1 = MatrixType::Random(rows, cols), MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols);
m3(rows, cols),
mzero = MatrixType::Zero(rows, cols),
identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Identity(rows, rows),
square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Random(rows, rows);
VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows),
vzero = VectorType::Zero(rows);
Scalar s1 = ei_random<Scalar>(); Scalar s1 = ei_random<Scalar>();

View File

@ -51,16 +51,10 @@ template<typename MatrixType> void submatrices(const MatrixType& m)
MatrixType m1 = MatrixType::Random(rows, cols), MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols),
m3(rows, cols), m3(rows, cols),
mzero = MatrixType::Zero(rows, cols),
ones = MatrixType::Ones(rows, cols), ones = MatrixType::Ones(rows, cols),
identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Identity(rows, rows),
square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Random(rows, rows); ::Random(rows, rows);
VectorType v1 = VectorType::Random(rows), VectorType v1 = VectorType::Random(rows);
v2 = VectorType::Random(rows),
v3 = VectorType::Random(rows),
vzero = VectorType::Zero(rows);
Scalar s1 = ei_random<Scalar>(); Scalar s1 = ei_random<Scalar>();

View File

@ -13,7 +13,6 @@ template<typename MatrixType> void triangular(const MatrixType& m)
{ {
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
RealScalar largerEps = 10*test_precision<RealScalar>(); RealScalar largerEps = 10*test_precision<RealScalar>();
@ -25,16 +24,7 @@ template<typename MatrixType> void triangular(const MatrixType& m)
m3(rows, cols), m3(rows, cols),
m4(rows, cols), m4(rows, cols),
r1(rows, cols), r1(rows, cols),
r2(rows, cols), r2(rows, cols);
mzero = MatrixType::Zero(rows, cols),
mones = MatrixType::Ones(rows, cols),
identity = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Identity(rows, rows),
square = Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime>
::Random(rows, rows);
VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows),
vzero = VectorType::Zero(rows);
MatrixType m1up = m1.template part<Eigen::UpperTriangular>(); MatrixType m1up = m1.template part<Eigen::UpperTriangular>();
MatrixType m2up = m2.template part<Eigen::UpperTriangular>(); MatrixType m2up = m2.template part<Eigen::UpperTriangular>();

View File

@ -40,8 +40,7 @@ template<typename MatrixType> void product(const MatrixType& m)
// to test it, hence I consider that we will have tested Random.h // to test it, hence I consider that we will have tested Random.h
MatrixType m1 = MatrixType::Random(rows, cols), MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols),
m3(rows, cols), m3(rows, cols);
mzero = MatrixType::Zero(rows, cols);
RowSquareMatrixType RowSquareMatrixType
identity = RowSquareMatrixType::Identity(rows, rows), identity = RowSquareMatrixType::Identity(rows, rows),
square = RowSquareMatrixType::Random(rows, rows), square = RowSquareMatrixType::Random(rows, rows),
@ -49,9 +48,7 @@ template<typename MatrixType> void product(const MatrixType& m)
ColSquareMatrixType ColSquareMatrixType
square2 = ColSquareMatrixType::Random(cols, cols), square2 = ColSquareMatrixType::Random(cols, cols),
res2 = ColSquareMatrixType::Random(cols, cols); res2 = ColSquareMatrixType::Random(cols, cols);
RowVectorType v1 = RowVectorType::Random(rows), RowVectorType v1 = RowVectorType::Random(rows);
v2 = RowVectorType::Random(rows),
vzero = RowVectorType::Zero(rows);
ColVectorType vc2 = ColVectorType::Random(cols), vcres(cols); ColVectorType vc2 = ColVectorType::Random(cols), vcres(cols);
OtherMajorMatrixType tm1 = m1; OtherMajorMatrixType tm1 = m1;

View File

@ -8,6 +8,7 @@
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN2_SUPPORT #define EIGEN2_SUPPORT
#define EIGEN_NO_EIGEN2_DEPRECATED_WARNING
#include "main.h" #include "main.h"

View File

@ -29,7 +29,21 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
MatrixType a = MatrixType::Random(rows,cols); MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols); MatrixType a1 = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1; MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
MatrixType symmC = symmA;
// randomly nullify some rows/columns
{
Index count = 1;//internal::random<Index>(-cols,cols);
for(Index k=0; k<count; ++k)
{
Index i = internal::random<Index>(0,cols-1);
symmA.row(i).setZero();
symmA.col(i).setZero();
}
}
symmA.template triangularView<StrictlyUpper>().setZero(); symmA.template triangularView<StrictlyUpper>().setZero();
symmC.template triangularView<StrictlyUpper>().setZero();
MatrixType b = MatrixType::Random(rows,cols); MatrixType b = MatrixType::Random(rows,cols);
MatrixType b1 = MatrixType::Random(rows,cols); MatrixType b1 = MatrixType::Random(rows,cols);
@ -40,7 +54,7 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
SelfAdjointEigenSolver<MatrixType> eiDirect; SelfAdjointEigenSolver<MatrixType> eiDirect;
eiDirect.computeDirect(symmA); eiDirect.computeDirect(symmA);
// generalized eigen pb // generalized eigen pb
GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmA, symmB); GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);
VERIFY_IS_EQUAL(eiSymm.info(), Success); VERIFY_IS_EQUAL(eiSymm.info(), Success);
VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox( VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
@ -57,27 +71,28 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues()); VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
// generalized eigen problem Ax = lBx // generalized eigen problem Ax = lBx
eiSymmGen.compute(symmA, symmB,Ax_lBx); eiSymmGen.compute(symmC, symmB,Ax_lBx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success); VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
VERIFY((symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox( VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem BAx = lx // generalized eigen problem BAx = lx
eiSymmGen.compute(symmA, symmB,BAx_lx); eiSymmGen.compute(symmC, symmB,BAx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success); VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
VERIFY((symmB.template selfadjointView<Lower>() * (symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox( VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem ABx = lx // generalized eigen problem ABx = lx
eiSymmGen.compute(symmA, symmB,ABx_lx); eiSymmGen.compute(symmC, symmB,ABx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success); VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
VERIFY((symmA.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox( VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
eiSymm.compute(symmC);
MatrixType sqrtSymmA = eiSymm.operatorSqrt(); MatrixType sqrtSymmA = eiSymm.operatorSqrt();
VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA); VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt()); VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
MatrixType id = MatrixType::Identity(rows, cols); MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1)); VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));
@ -95,15 +110,15 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt()); VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
// test Tridiagonalization's methods // test Tridiagonalization's methods
Tridiagonalization<MatrixType> tridiag(symmA); Tridiagonalization<MatrixType> tridiag(symmC);
// FIXME tridiag.matrixQ().adjoint() does not work // FIXME tridiag.matrixQ().adjoint() does not work
VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint()); VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
if (rows > 1) if (rows > 1)
{ {
// Test matrix with NaN // Test matrix with NaN
symmA(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN(); symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmA); SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence); VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
} }
} }
@ -113,8 +128,10 @@ void test_eigensolver_selfadjoint()
int s = 0; int s = 0;
for(int i = 0; i < g_repeat; i++) { for(int i = 0; i < g_repeat; i++) {
// very important to test 3x3 and 2x2 matrices since we provide special paths for them // very important to test 3x3 and 2x2 matrices since we provide special paths for them
CALL_SUBTEST_1( selfadjointeigensolver(Matrix2f()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) ); CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) ); CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix3d()) );
CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) ); CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4); s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) ); CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) );

View File

@ -114,6 +114,32 @@ template<typename Scalar> void lines()
} }
} }
template<typename Scalar> void planes()
{
using std::abs;
typedef Hyperplane<Scalar, 3> Plane;
typedef Matrix<Scalar,3,1> Vector;
for(int i = 0; i < 10; i++)
{
Vector v0 = Vector::Random();
Vector v1(v0), v2(v0);
if(internal::random<double>(0,1)>0.25)
v1 += Vector::Random();
if(internal::random<double>(0,1)>0.25)
v2 += v1 * std::pow(internal::random<Scalar>(0,1),internal::random<int>(1,16));
if(internal::random<double>(0,1)>0.25)
v2 += Vector::Random() * std::pow(internal::random<Scalar>(0,1),internal::random<int>(1,16));
Plane p0 = Plane::Through(v0, v1, v2);
VERIFY_IS_APPROX(p0.normal().norm(), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v0), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v1), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v2), Scalar(1));
}
}
template<typename Scalar> void hyperplane_alignment() template<typename Scalar> void hyperplane_alignment()
{ {
typedef Hyperplane<Scalar,3,AutoAlign> Plane3a; typedef Hyperplane<Scalar,3,AutoAlign> Plane3a;
@ -153,5 +179,7 @@ void test_geo_hyperplane()
CALL_SUBTEST_4( hyperplane(Hyperplane<std::complex<double>,5>()) ); CALL_SUBTEST_4( hyperplane(Hyperplane<std::complex<double>,5>()) );
CALL_SUBTEST_1( lines<float>() ); CALL_SUBTEST_1( lines<float>() );
CALL_SUBTEST_3( lines<double>() ); CALL_SUBTEST_3( lines<double>() );
CALL_SUBTEST_2( planes<float>() );
CALL_SUBTEST_5( planes<double>() );
} }
} }

View File

@ -98,11 +98,19 @@ template<typename Scalar, int Mode, int Options> void transformations()
Matrix3 matrot1, m; Matrix3 matrot1, m;
Scalar a = internal::random<Scalar>(-Scalar(M_PI), Scalar(M_PI)); Scalar a = internal::random<Scalar>(-Scalar(M_PI), Scalar(M_PI));
Scalar s0 = internal::random<Scalar>(); Scalar s0 = internal::random<Scalar>(),
s1 = internal::random<Scalar>();
while(v0.norm() < test_precision<Scalar>()) v0 = Vector3::Random();
while(v1.norm() < test_precision<Scalar>()) v1 = Vector3::Random();
VERIFY_IS_APPROX(v0, AngleAxisx(a, v0.normalized()) * v0); VERIFY_IS_APPROX(v0, AngleAxisx(a, v0.normalized()) * v0);
VERIFY_IS_APPROX(-v0, AngleAxisx(Scalar(M_PI), v0.unitOrthogonal()) * v0); VERIFY_IS_APPROX(-v0, AngleAxisx(Scalar(M_PI), v0.unitOrthogonal()) * v0);
VERIFY_IS_APPROX(cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0)); if(abs(cos(a)) > test_precision<Scalar>())
{
VERIFY_IS_APPROX(cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0));
}
m = AngleAxisx(a, v0.normalized()).toRotationMatrix().adjoint(); m = AngleAxisx(a, v0.normalized()).toRotationMatrix().adjoint();
VERIFY_IS_APPROX(Matrix3::Identity(), m * AngleAxisx(a, v0.normalized())); VERIFY_IS_APPROX(Matrix3::Identity(), m * AngleAxisx(a, v0.normalized()));
VERIFY_IS_APPROX(Matrix3::Identity(), AngleAxisx(a, v0.normalized()) * m); VERIFY_IS_APPROX(Matrix3::Identity(), AngleAxisx(a, v0.normalized()) * m);
@ -123,11 +131,18 @@ template<typename Scalar, int Mode, int Options> void transformations()
// angle-axis conversion // angle-axis conversion
AngleAxisx aa = AngleAxisx(q1); AngleAxisx aa = AngleAxisx(q1);
VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1);
VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1);
if(abs(aa.angle()) > NumTraits<Scalar>::dummy_precision())
{
VERIFY( !(q1 * v1).isApprox(Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1) );
}
aa.fromRotationMatrix(aa.toRotationMatrix()); aa.fromRotationMatrix(aa.toRotationMatrix());
VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1);
VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1); if(abs(aa.angle()) > NumTraits<Scalar>::dummy_precision())
{
VERIFY( !(q1 * v1).isApprox(Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1) );
}
// AngleAxis // AngleAxis
VERIFY_IS_APPROX(AngleAxisx(a,v1.normalized()).toRotationMatrix(), VERIFY_IS_APPROX(AngleAxisx(a,v1.normalized()).toRotationMatrix(),
@ -347,7 +362,9 @@ template<typename Scalar, int Mode, int Options> void transformations()
// test transform inversion // test transform inversion
t0.setIdentity(); t0.setIdentity();
t0.translate(v0); t0.translate(v0);
t0.linear().setRandom(); do {
t0.linear().setRandom();
} while(t0.linear().jacobiSvd().singularValues()(2)<test_precision<Scalar>());
Matrix4 t044 = Matrix4::Zero(); Matrix4 t044 = Matrix4::Zero();
t044(3,3) = 1; t044(3,3) = 1;
t044.block(0,0,t0.matrix().rows(),4) = t0.matrix(); t044.block(0,0,t0.matrix().rows(),4) = t0.matrix();
@ -397,6 +414,29 @@ template<typename Scalar, int Mode, int Options> void transformations()
t20 = Translation2(v20) * (Rotation2D<Scalar>(s0) * Eigen::Scaling(s0)); t20 = Translation2(v20) * (Rotation2D<Scalar>(s0) * Eigen::Scaling(s0));
t21 = Translation2(v20) * Rotation2D<Scalar>(s0) * Eigen::Scaling(s0); t21 = Translation2(v20) * Rotation2D<Scalar>(s0) * Eigen::Scaling(s0);
VERIFY_IS_APPROX(t20,t21); VERIFY_IS_APPROX(t20,t21);
Rotation2D<Scalar> R0(s0), R1(s1);
t20 = Translation2(v20) * (R0 * Eigen::Scaling(s0));
t21 = Translation2(v20) * R0 * Eigen::Scaling(s0);
VERIFY_IS_APPROX(t20,t21);
t20 = Translation2(v20) * (R0 * R0.inverse() * Eigen::Scaling(s0));
t21 = Translation2(v20) * Eigen::Scaling(s0);
VERIFY_IS_APPROX(t20,t21);
VERIFY_IS_APPROX(s0, (R0.slerp(0, R1)).angle());
VERIFY_IS_APPROX(s1, (R0.slerp(1, R1)).angle());
VERIFY_IS_APPROX(s0, (R0.slerp(0.5, R0)).angle());
VERIFY_IS_APPROX(Scalar(0), (R0.slerp(0.5, R0.inverse())).angle());
// check basic features
{
Rotation2D<Scalar> r1; // default ctor
r1 = Rotation2D<Scalar>(s0); // copy assignment
VERIFY_IS_APPROX(r1.angle(),s0);
Rotation2D<Scalar> r2(r1); // copy ctor
VERIFY_IS_APPROX(r2.angle(),s0);
}
} }
template<typename Scalar> void transform_alignment() template<typename Scalar> void transform_alignment()

View File

@ -321,16 +321,23 @@ void jacobisvd_inf_nan()
VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf)); VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV); svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
Scalar some_nan = zero<Scalar>() / zero<Scalar>(); Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
VERIFY(some_nan != some_nan); VERIFY(nan != nan);
svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV); svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV);
MatrixType m = MatrixType::Zero(10,10); MatrixType m = MatrixType::Zero(10,10);
m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf; m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
svd.compute(m, ComputeFullU | ComputeFullV); svd.compute(m, ComputeFullU | ComputeFullV);
m = MatrixType::Zero(10,10); m = MatrixType::Zero(10,10);
m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan; m(internal::random<int>(0,9), internal::random<int>(0,9)) = nan;
svd.compute(m, ComputeFullU | ComputeFullV);
// regression test for bug 791
m.resize(3,3);
m << 0, 2*NumTraits<Scalar>::epsilon(), 0.5,
0, -0.5, 0,
nan, 0, 0;
svd.compute(m, ComputeFullU | ComputeFullV); svd.compute(m, ComputeFullU | ComputeFullV);
} }
@ -434,6 +441,7 @@ void test_jacobisvd()
// Test on inf/nan matrix // Test on inf/nan matrix
CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() ); CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
CALL_SUBTEST_10( jacobisvd_inf_nan<MatrixXd>() );
} }
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));

View File

@ -17,13 +17,36 @@
#include <sstream> #include <sstream>
#include <vector> #include <vector>
#include <typeinfo> #include <typeinfo>
// The following includes of STL headers have to be done _before_ the
// definition of macros min() and max(). The reason is that many STL
// implementations will not work properly as the min and max symbols collide
// with the STL functions std:min() and std::max(). The STL headers may check
// for the macro definition of min/max and issue a warning or undefine the
// macros.
//
// Still, Windows defines min() and max() in windef.h as part of the regular
// Windows system interfaces and many other Windows APIs depend on these
// macros being available. To prevent the macro expansion of min/max and to
// make Eigen compatible with the Windows environment all function calls of
// std::min() and std::max() have to be written with parenthesis around the
// function name.
//
// All STL headers used by Eigen should be included here. Because main.h is
// included before any Eigen header and because the STL headers are guarded
// against multiple inclusions, no STL header will see our own min/max macro
// definitions.
#include <limits> #include <limits>
#include <algorithm> #include <algorithm>
#include <sstream>
#include <complex> #include <complex>
#include <deque> #include <deque>
#include <queue> #include <queue>
#include <list>
// To test that all calls from Eigen code to std::min() and std::max() are
// protected by parenthesis against macro expansion, the min()/max() macros
// are defined here and any not-parenthesized min/max call will cause a
// compiler error.
#define min(A,B) please_protect_your_min_with_parentheses #define min(A,B) please_protect_your_min_with_parentheses
#define max(A,B) please_protect_your_max_with_parentheses #define max(A,B) please_protect_your_max_with_parentheses
@ -383,6 +406,26 @@ void randomPermutationVector(PermutationVectorType& v, typename PermutationVecto
} }
} }
template<typename T> bool isNotNaN(const T& x)
{
return x==x;
}
template<typename T> bool isNaN(const T& x)
{
return x!=x;
}
template<typename T> bool isInf(const T& x)
{
return x > NumTraits<T>::highest();
}
template<typename T> bool isMinusInf(const T& x)
{
return x < NumTraits<T>::lowest();
}
} // end namespace Eigen } // end namespace Eigen
template<typename T> struct GetDifferentType; template<typename T> struct GetDifferentType;

View File

@ -165,6 +165,38 @@ void ctms_decompositions()
Eigen::JacobiSVD<Matrix> jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV); Eigen::JacobiSVD<Matrix> jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV);
} }
void test_zerosized() {
// default constructors:
Eigen::MatrixXd A;
Eigen::VectorXd v;
// explicit zero-sized:
Eigen::ArrayXXd A0(0,0);
Eigen::ArrayXd v0(std::ptrdiff_t(0)); // FIXME ArrayXd(0) is ambiguous
// assigning empty objects to each other:
A=A0;
v=v0;
}
template<typename MatrixType> void test_reference(const MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
enum { Flag = MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor};
enum { TransposeFlag = !MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor};
typename MatrixType::Index rows = m.rows(), cols=m.cols();
// Dynamic reference:
typedef Eigen::Ref<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flag > > Ref;
typedef Eigen::Ref<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, TransposeFlag> > RefT;
Ref r1(m);
Ref r2(m.block(rows/3, cols/4, rows/2, cols/2));
RefT r3(m.transpose());
RefT r4(m.topLeftCorner(rows/2, cols/2).transpose());
VERIFY_RAISES_ASSERT(RefT r5(m));
VERIFY_RAISES_ASSERT(Ref r6(m.transpose()));
VERIFY_RAISES_ASSERT(Ref r7(Scalar(2) * m));
}
void test_nomalloc() void test_nomalloc()
{ {
// check that our operator new is indeed called: // check that our operator new is indeed called:
@ -175,5 +207,6 @@ void test_nomalloc()
// Check decomposition modules with dynamic matrices that have a known compile-time max size (ctms) // Check decomposition modules with dynamic matrices that have a known compile-time max size (ctms)
CALL_SUBTEST_4(ctms_decompositions<float>()); CALL_SUBTEST_4(ctms_decompositions<float>());
CALL_SUBTEST_5(test_zerosized());
CALL_SUBTEST_6(test_reference(Matrix<float,32,32>()));
} }

View File

@ -80,7 +80,9 @@ void testVectorType(const VectorType& base)
Matrix<Scalar,1,Dynamic> col_vector(size); Matrix<Scalar,1,Dynamic> col_vector(size);
row_vector.setLinSpaced(size,low,high); row_vector.setLinSpaced(size,low,high);
col_vector.setLinSpaced(size,low,high); col_vector.setLinSpaced(size,low,high);
VERIFY( row_vector.isApprox(col_vector.transpose(), NumTraits<Scalar>::epsilon())); // when using the extended precision (e.g., FPU) the relative error might exceed 1 bit
// when computing the squared sum in isApprox, thus the 2x factor.
VERIFY( row_vector.isApprox(col_vector.transpose(), Scalar(2)*NumTraits<Scalar>::epsilon()));
Matrix<Scalar,Dynamic,1> size_changer(size+50); Matrix<Scalar,Dynamic,1> size_changer(size+50);
size_changer.setLinSpaced(size,low,high); size_changer.setLinSpaced(size,low,high);

View File

@ -239,6 +239,12 @@ template<typename Scalar> void packetmath_real()
data2[i] = internal::random<Scalar>(-87,88); data2[i] = internal::random<Scalar>(-87,88);
} }
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasExp, std::exp, internal::pexp); CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasExp, std::exp, internal::pexp);
{
data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
packet_helper<internal::packet_traits<Scalar>::HasExp,Packet> h;
h.store(data2, internal::pexp(h.load(data1)));
VERIFY(isNaN(data2[0]));
}
for (int i=0; i<size; ++i) for (int i=0; i<size; ++i)
{ {
@ -247,8 +253,22 @@ template<typename Scalar> void packetmath_real()
} }
if(internal::random<float>(0,1)<0.1) if(internal::random<float>(0,1)<0.1)
data1[internal::random<int>(0, PacketSize)] = 0; data1[internal::random<int>(0, PacketSize)] = 0;
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLog, std::log, internal::plog);
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSqrt, std::sqrt, internal::psqrt); CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasSqrt, std::sqrt, internal::psqrt);
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLog, std::log, internal::plog);
{
data1[0] = std::numeric_limits<Scalar>::quiet_NaN();
packet_helper<internal::packet_traits<Scalar>::HasLog,Packet> h;
h.store(data2, internal::plog(h.load(data1)));
VERIFY(isNaN(data2[0]));
data1[0] = -1.0f;
h.store(data2, internal::plog(h.load(data1)));
VERIFY(isNaN(data2[0]));
#if !EIGEN_FAST_MATH
h.store(data2, internal::psqrt(h.load(data1)));
VERIFY(isNaN(data2[0]));
VERIFY(isNaN(data2[1]));
#endif
}
} }
template<typename Scalar> void packetmath_notcomplex() template<typename Scalar> void packetmath_notcomplex()

View File

@ -139,4 +139,12 @@ template<typename MatrixType> void product(const MatrixType& m)
// inner product // inner product
Scalar x = square2.row(c) * square2.col(c2); Scalar x = square2.row(c) * square2.col(c2);
VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum()); VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum());
// outer product
VERIFY_IS_APPROX(m1.col(c) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
VERIFY_IS_APPROX(m1.row(r).transpose() * m1.col(c).transpose(), m1.block(r,0,1,cols).transpose() * m1.block(0,c,rows,1).transpose());
VERIFY_IS_APPROX(m1.block(0,c,rows,1) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
VERIFY_IS_APPROX(m1.col(c) * m1.block(r,0,1,cols), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
VERIFY_IS_APPROX(m1.leftCols(1) * m1.row(r), m1.block(0,0,rows,1) * m1.block(r,0,1,cols));
VERIFY_IS_APPROX(m1.col(c) * m1.topRows(1), m1.block(0,c,rows,1) * m1.block(0,0,1,cols));
} }

View File

@ -183,15 +183,15 @@ void call_ref()
VERIFY_EVALUATION_COUNT( call_ref_1(a,a), 0); VERIFY_EVALUATION_COUNT( call_ref_1(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(b,b.transpose()), 0); VERIFY_EVALUATION_COUNT( call_ref_1(b,b.transpose()), 0);
// call_ref_1(ac); // does not compile because ac is const // call_ref_1(ac,a<c); // does not compile because ac is const
VERIFY_EVALUATION_COUNT( call_ref_1(ab,ab), 0); VERIFY_EVALUATION_COUNT( call_ref_1(ab,ab), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4),a.head(4)), 0); VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4),a.head(4)), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(abc,abc), 0); VERIFY_EVALUATION_COUNT( call_ref_1(abc,abc), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3),A.col(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3),A.col(3)), 0);
// call_ref_1(A.row(3)); // does not compile because innerstride!=1 // call_ref_1(A.row(3),A.row(3)); // does not compile because innerstride!=1
VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3),A.row(3).transpose()), 0); VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3),A.row(3).transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3),A.row(3).transpose()), 0); VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3),A.row(3).transpose()), 0);
// call_ref_1(a+a); // does not compile for obvious reason // call_ref_1(a+a, a+a); // does not compile for obvious reason
MatrixXf tmp = A*A.col(1); MatrixXf tmp = A*A.col(1);
VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1), tmp), 1); // evaluated into a temp VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1), tmp), 1); // evaluated into a temp
@ -212,7 +212,7 @@ void call_ref()
VERIFY_EVALUATION_COUNT( call_ref_5(a,a), 0); VERIFY_EVALUATION_COUNT( call_ref_5(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3),a.head(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3),a.head(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(A,A), 0); VERIFY_EVALUATION_COUNT( call_ref_5(A,A), 0);
// call_ref_5(A.transpose()); // does not compile // call_ref_5(A.transpose(),A.transpose()); // does not compile because storage order does not match
VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2),A.block(1,1,2,2)), 0); VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2),A.block(1,1,2,2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(b,b), 0); // storage order do not match, but this is a degenerate case that should work VERIFY_EVALUATION_COUNT( call_ref_5(b,b), 0); // storage order do not match, but this is a degenerate case that should work
VERIFY_EVALUATION_COUNT( call_ref_5(a.row(3),a.row(3)), 0); VERIFY_EVALUATION_COUNT( call_ref_5(a.row(3),a.row(3)), 0);

View File

@ -124,7 +124,23 @@ void check_sparse_determinant(Solver& solver, const typename Solver::MatrixType&
Scalar refDet = dA.determinant(); Scalar refDet = dA.determinant();
VERIFY_IS_APPROX(refDet,solver.determinant()); VERIFY_IS_APPROX(refDet,solver.determinant());
} }
template<typename Solver, typename DenseMat>
void check_sparse_abs_determinant(Solver& solver, const typename Solver::MatrixType& A, const DenseMat& dA)
{
using std::abs;
typedef typename Solver::MatrixType Mat;
typedef typename Mat::Scalar Scalar;
solver.compute(A);
if (solver.info() != Success)
{
std::cerr << "sparse solver testing: factorization failed (check_sparse_abs_determinant)\n";
return;
}
Scalar refDet = abs(dA.determinant());
VERIFY_IS_APPROX(refDet,solver.absDeterminant());
}
template<typename Solver, typename DenseMat> template<typename Solver, typename DenseMat>
int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typename Solver::MatrixType& halfA, DenseMat& dA, int maxSize = 300) int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typename Solver::MatrixType& halfA, DenseMat& dA, int maxSize = 300)
@ -324,3 +340,20 @@ template<typename Solver> void check_sparse_square_determinant(Solver& solver)
check_sparse_determinant(solver, A, dA); check_sparse_determinant(solver, A, dA);
} }
} }
template<typename Solver> void check_sparse_square_abs_determinant(Solver& solver)
{
typedef typename Solver::MatrixType Mat;
typedef typename Mat::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
// generate the problem
Mat A;
DenseMatrix dA;
generate_sparse_square_problem(solver, A, dA, 30);
A.makeCompressed();
for (int i = 0; i < g_repeat; i++) {
check_sparse_abs_determinant(solver, A, dA);
}
}

View File

@ -44,6 +44,9 @@ template<typename T> void test_sparselu_T()
check_sparse_square_solving(sparselu_colamd); check_sparse_square_solving(sparselu_colamd);
check_sparse_square_solving(sparselu_amd); check_sparse_square_solving(sparselu_amd);
check_sparse_square_solving(sparselu_natural); check_sparse_square_solving(sparselu_natural);
check_sparse_square_abs_determinant(sparselu_colamd);
check_sparse_square_abs_determinant(sparselu_amd);
} }
void test_sparselu() void test_sparselu()

View File

@ -10,12 +10,11 @@
#include <Eigen/SparseQR> #include <Eigen/SparseQR>
template<typename MatrixType,typename DenseMat> template<typename MatrixType,typename DenseMat>
int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 150) int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300)
{ {
eigen_assert(maxRows >= maxCols);
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
int rows = internal::random<int>(1,maxRows); int rows = internal::random<int>(1,maxRows);
int cols = internal::random<int>(1,maxCols); int cols = internal::random<int>(1,rows);
double density = (std::max)(8./(rows*cols), 0.01); double density = (std::max)(8./(rows*cols), 0.01);
A.resize(rows,cols); A.resize(rows,cols);
@ -54,6 +53,8 @@ template<typename Scalar> void test_sparseqr_scalar()
b = dA * DenseVector::Random(A.cols()); b = dA * DenseVector::Random(A.cols());
solver.compute(A); solver.compute(A);
if(internal::random<float>(0,1)>0.5)
solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != Success) if (solver.info() != Success)
{ {
std::cerr << "sparse QR factorization failed\n"; std::cerr << "sparse QR factorization failed\n";

View File

@ -9,11 +9,6 @@
#include "main.h" #include "main.h"
template<typename T> bool isNotNaN(const T& x)
{
return x==x;
}
// workaround aggressive optimization in ICC // workaround aggressive optimization in ICC
template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; } template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }

View File

@ -178,11 +178,11 @@ template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Affine>& t)
template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Projective>& t) { glLoadMatrix(t.matrix()); } template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Projective>& t) { glLoadMatrix(t.matrix()); }
template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,AffineCompact>& t) { glLoadMatrix(Transform<Scalar,3,Affine>(t).matrix()); } template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,AffineCompact>& t) { glLoadMatrix(Transform<Scalar,3,Affine>(t).matrix()); }
static void glRotate(const Rotation2D<float>& rot) inline void glRotate(const Rotation2D<float>& rot)
{ {
glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f); glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f);
} }
static void glRotate(const Rotation2D<double>& rot) inline void glRotate(const Rotation2D<double>& rot)
{ {
glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0); glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0);
} }
@ -246,18 +246,18 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev)
#ifdef GL_VERSION_2_0 #ifdef GL_VERSION_2_0
static void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); }
static void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); }
static void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); }
static void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); }
static void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); }
static void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); }
static void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); }
static void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); }
static void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); }
EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const)
@ -294,9 +294,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix
#ifdef GL_VERSION_3_0 #ifdef GL_VERSION_3_0
static void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); }
static void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); }
static void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); }
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei)
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei)
@ -305,9 +305,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei)
#endif #endif
#ifdef GL_ARB_gpu_shader_fp64 #ifdef GL_ARB_gpu_shader_fp64
static void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); }
static void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); }
static void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); }
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei)
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei)

View File

@ -110,7 +110,6 @@ void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) co
using std::abs; using std::abs;
using std::pow; using std::pow;
ArrayType logTdiag = m_A.diagonal().array().log();
res.coeffRef(0,0) = pow(m_A.coeff(0,0), p); res.coeffRef(0,0) = pow(m_A.coeff(0,0), p);
for (Index i=1; i < m_A.cols(); ++i) { for (Index i=1; i < m_A.cols(); ++i) {

View File

@ -10,12 +10,10 @@ FOREACH(example_src ${examples_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(example_${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) target_link_libraries(example_${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif() endif()
GET_TARGET_PROPERTY(example_executable
example_${example} LOCATION)
ADD_CUSTOM_COMMAND( ADD_CUSTOM_COMMAND(
TARGET example_${example} TARGET example_${example}
POST_BUILD POST_BUILD
COMMAND ${example_executable} COMMAND example_${example}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
) )
ADD_DEPENDENCIES(unsupported_examples example_${example}) ADD_DEPENDENCIES(unsupported_examples example_${example})

View File

@ -14,12 +14,10 @@ FOREACH(snippet_src ${snippets_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif() endif()
GET_TARGET_PROPERTY(compile_snippet_executable
${compile_snippet_target} LOCATION)
ADD_CUSTOM_COMMAND( ADD_CUSTOM_COMMAND(
TARGET ${compile_snippet_target} TARGET ${compile_snippet_target}
POST_BUILD POST_BUILD
COMMAND ${compile_snippet_executable} COMMAND ${compile_snippet_target}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
) )
ADD_DEPENDENCIES(unsupported_snippets ${compile_snippet_target}) ADD_DEPENDENCIES(unsupported_snippets ${compile_snippet_target})

View File

@ -29,11 +29,7 @@ ei_add_test(NonLinearOptimization)
ei_add_test(NumericalDiff) ei_add_test(NumericalDiff)
ei_add_test(autodiff) ei_add_test(autodiff)
if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$")
ei_add_test(BVH) ei_add_test(BVH)
endif()
ei_add_test(matrix_exponential) ei_add_test(matrix_exponential)
ei_add_test(matrix_function) ei_add_test(matrix_function)
ei_add_test(matrix_power) ei_add_test(matrix_power)
@ -73,8 +69,9 @@ if(NOT EIGEN_TEST_NO_OPENGL)
find_package(GLUT) find_package(GLUT)
find_package(GLEW) find_package(GLEW)
if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND) if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND)
include_directories(${OPENGL_INCLUDE_DIR} ${GLUT_INCLUDE_DIR} ${GLEW_INCLUDE_DIRS})
ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ") ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ")
set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES}) set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OPENGL_LIBRARIES})
ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" ) ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" )
else() else()
ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ") ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")

File diff suppressed because it is too large Load Diff

View File

@ -104,9 +104,7 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const
// 1) the roots found are correct // 1) the roots found are correct
// 2) the roots have distinct moduli // 2) the roots have distinct moduli
typedef typename POLYNOMIAL::Scalar Scalar;
typedef typename REAL_ROOTS::Scalar Real; typedef typename REAL_ROOTS::Scalar Real;
typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
//Test realRoots //Test realRoots
std::vector< Real > calc_realRoots; std::vector< Real > calc_realRoots;

View File

@ -20,6 +20,11 @@
#pragma once #pragma once
#ifndef MKL_BLAS
#define MKL_BLAS MKL_DOMAIN_BLAS
#endif
#include <gtsam/global_includes.h> #include <gtsam/global_includes.h>
#include <gtsam/3rdparty/Eigen/Eigen/Core> #include <gtsam/3rdparty/Eigen/Eigen/Core>
#include <iosfwd> #include <iosfwd>

View File

@ -212,6 +212,18 @@ public:
boost::optional<Matrix&> H5 = boost::none, boost::optional<Matrix&> H5 = boost::none,
boost::optional<Matrix&> H6 = boost::none) const; boost::optional<Matrix&> H6 = boost::none) const;
/** @deprecated The following function has been deprecated, use PreintegrationBase::predict with the same signature instead */
static void Predict(const Pose3& pose_i, const Vector3& vel_i, Pose3& pose_j, Vector3& vel_j,
const imuBias::ConstantBias& bias_i, const CombinedPreintegratedMeasurements& PIM, const Vector3& gravity,
const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false,
boost::optional<Vector3&> deltaPij_biascorrected_out = boost::none,
boost::optional<Vector3&> deltaVij_biascorrected_out = boost::none) {
PoseVelocityBias PVB(PIM.predict(pose_i, vel_i, bias_i, gravity, omegaCoriolis, use2ndOrderCoriolis, deltaPij_biascorrected_out, deltaVij_biascorrected_out));
pose_j = PVB.pose;
vel_j = PVB.velocity;
}
private: private:
/** Serialization function */ /** Serialization function */

View File

@ -194,6 +194,17 @@ public:
boost::optional<Matrix&> H4 = boost::none, boost::optional<Matrix&> H4 = boost::none,
boost::optional<Matrix&> H5 = boost::none) const; boost::optional<Matrix&> H5 = boost::none) const;
/** @deprecated The following function has been deprecated, use PreintegrationBase::predict with the same signature instead */
static void Predict(const Pose3& pose_i, const Vector3& vel_i, Pose3& pose_j, Vector3& vel_j,
const imuBias::ConstantBias& bias_i, const PreintegratedMeasurements& PIM, const Vector3& gravity,
const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false,
boost::optional<Vector3&> deltaPij_biascorrected_out = boost::none,
boost::optional<Vector3&> deltaVij_biascorrected_out = boost::none) {
PoseVelocityBias PVB(PIM.predict(pose_i, vel_i, bias_i, gravity, omegaCoriolis, use2ndOrderCoriolis, deltaPij_biascorrected_out, deltaVij_biascorrected_out));
pose_j = PVB.pose;
vel_j = PVB.velocity;
}
private: private:
/** Serialization function */ /** Serialization function */

View File

@ -320,11 +320,11 @@ TEST(CombinedImuFactor, PredictRotation) {
Combined_pre_int_data, gravity, omegaCoriolis); Combined_pre_int_data, gravity, omegaCoriolis);
// Predict // Predict
Pose3 x(Rot3().ypr(0,0, 0), Point3(0,0,0)); Pose3 x(Rot3().ypr(0,0, 0), Point3(0,0,0)), x2;
Vector3 v(0,0,0); Vector3 v(0,0,0), v2;
PoseVelocityBias poseVelocityBias = Combined_pre_int_data.predict(x,v,bias, gravity, omegaCoriolis); CombinedImuFactor::Predict(x, v, x2, v2, bias, Combinedfactor.preintegratedMeasurements(), gravity, omegaCoriolis);
Pose3 expectedPose(Rot3().ypr(M_PI/10, 0,0), Point3(0,0,0)); Pose3 expectedPose(Rot3().ypr(M_PI/10, 0,0), Point3(0,0,0));
EXPECT(assert_equal(expectedPose, poseVelocityBias.pose, tol)); EXPECT(assert_equal(expectedPose, x2, tol));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -913,13 +913,14 @@ TEST(ImuFactor, PredictRotation) {
ImuFactor factor(X(1), V(1), X(2), V(2), B(1), pre_int_data, gravity, omegaCoriolis); ImuFactor factor(X(1), V(1), X(2), V(2), B(1), pre_int_data, gravity, omegaCoriolis);
// Predict // Predict
Pose3 x1; Pose3 x1, x2;
Vector3 v1(0, 0.0, 0.0); Vector3 v1 = Vector3(0, 0.0, 0.0);
PoseVelocityBias poseVelocity = pre_int_data.predict(x1, v1, bias, gravity, omegaCoriolis); Vector3 v2;
ImuFactor::Predict(x1, v1, x2, v2, bias, factor.preintegratedMeasurements(), gravity, omegaCoriolis);
Pose3 expectedPose(Rot3().ypr(M_PI/10, 0, 0), Point3(0, 0, 0)); Pose3 expectedPose(Rot3().ypr(M_PI/10, 0, 0), Point3(0, 0, 0));
Vector3 expectedVelocity; expectedVelocity<<0,0,0; Vector3 expectedVelocity; expectedVelocity<<0,0,0;
EXPECT(assert_equal(expectedPose, poseVelocity.pose)); EXPECT(assert_equal(expectedPose, x2));
EXPECT(assert_equal(Vector(expectedVelocity), Vector(poseVelocity.velocity))); EXPECT(assert_equal(Vector(expectedVelocity), Vector(v2)));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -106,8 +106,8 @@ public:
Ab(size()).col(0) = - traits<T>::Local(measurement_, hx); // - unwhitenedError(x_bar) Ab(size()).col(0) = - traits<T>::Local(measurement_, hx); // - unwhitenedError(x_bar)
// Whiten the corresponding system, Ab already contains RHS // Whiten the corresponding system, Ab already contains RHS
Vector dummy(Dim); Vector b = Ab(size()).col(0); // need b to be valid for Robust noise models
noiseModel_->WhitenSystem(Ab.matrix(), dummy); noiseModel_->WhitenSystem(Ab.matrix(), b);
return factor; return factor;
} }

View File

@ -42,6 +42,8 @@ LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTrans
boost::algorithm::to_upper(s); boost::algorithm::to_upper(s);
if (s == "SILENT") if (s == "SILENT")
return LevenbergMarquardtParams::SILENT; return LevenbergMarquardtParams::SILENT;
if (s == "SUMMARY")
return LevenbergMarquardtParams::SUMMARY;
if (s == "LAMBDA") if (s == "LAMBDA")
return LevenbergMarquardtParams::LAMBDA; return LevenbergMarquardtParams::LAMBDA;
if (s == "TRYLAMBDA") if (s == "TRYLAMBDA")
@ -65,6 +67,9 @@ std::string LevenbergMarquardtParams::verbosityLMTranslator(
case LevenbergMarquardtParams::SILENT: case LevenbergMarquardtParams::SILENT:
s = "SILENT"; s = "SILENT";
break; break;
case LevenbergMarquardtParams::SUMMARY:
s = "SUMMARY";
break;
case LevenbergMarquardtParams::TERMINATION: case LevenbergMarquardtParams::TERMINATION:
s = "TERMINATION"; s = "TERMINATION";
break; break;
@ -219,9 +224,15 @@ void LevenbergMarquardtOptimizer::iterate() {
cout << "linearizing = " << endl; cout << "linearizing = " << endl;
GaussianFactorGraph::shared_ptr linear = linearize(); GaussianFactorGraph::shared_ptr linear = linearize();
if(state_.totalNumberInnerIterations==0) // write initial error if(state_.totalNumberInnerIterations==0) { // write initial error
writeLogFile(state_.error); writeLogFile(state_.error);
if (lmVerbosity == LevenbergMarquardtParams::SUMMARY) {
cout << "Initial error: " << state_.error << ", values: " << state_.values.size()
<< std::endl;
}
}
// Keep increasing lambda until we make make progress // Keep increasing lambda until we make make progress
while (true) { while (true) {
@ -248,6 +259,8 @@ void LevenbergMarquardtOptimizer::iterate() {
systemSolvedSuccessfully = false; systemSolvedSuccessfully = false;
} }
double linearizedCostChange = 0,
newlinearizedError = 0;
if (systemSolvedSuccessfully) { if (systemSolvedSuccessfully) {
params_.reuse_diagonal_ = true; params_.reuse_diagonal_ = true;
@ -257,9 +270,9 @@ void LevenbergMarquardtOptimizer::iterate() {
delta.print("delta"); delta.print("delta");
// cost change in the linearized system (old - new) // cost change in the linearized system (old - new)
double newlinearizedError = linear->error(delta); newlinearizedError = linear->error(delta);
double linearizedCostChange = state_.error - newlinearizedError; linearizedCostChange = state_.error - newlinearizedError;
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "newlinearizedError = " << newlinearizedError << cout << "newlinearizedError = " << newlinearizedError <<
" linearizedCostChange = " << linearizedCostChange << endl; " linearizedCostChange = " << linearizedCostChange << endl;
@ -304,6 +317,12 @@ void LevenbergMarquardtOptimizer::iterate() {
} }
} }
if (lmVerbosity == LevenbergMarquardtParams::SUMMARY) {
cout << "[" << state_.iterations << "]: " << "new error = " << newlinearizedError
<< ", delta = " << linearizedCostChange << ", lambda = " << state_.lambda
<< ", success = " << systemSolvedSuccessfully << std::endl;
}
++state_.totalNumberInnerIterations; ++state_.totalNumberInnerIterations;
if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity
@ -320,7 +339,8 @@ void LevenbergMarquardtOptimizer::iterate() {
// check if lambda is too big // check if lambda is too big
if (state_.lambda >= params_.lambdaUpperBound) { if (state_.lambda >= params_.lambdaUpperBound) {
if (nloVerbosity >= NonlinearOptimizerParams::TERMINATION) if (nloVerbosity >= NonlinearOptimizerParams::TERMINATION ||
lmVerbosity == LevenbergMarquardtParams::SUMMARY)
cout << "Warning: Levenberg-Marquardt giving up because " cout << "Warning: Levenberg-Marquardt giving up because "
"cannot decrease error with maximum lambda" << endl; "cannot decrease error with maximum lambda" << endl;
break; break;

View File

@ -38,7 +38,7 @@ class GTSAM_EXPORT LevenbergMarquardtParams: public NonlinearOptimizerParams {
public: public:
/** See LevenbergMarquardtParams::lmVerbosity */ /** See LevenbergMarquardtParams::lmVerbosity */
enum VerbosityLM { enum VerbosityLM {
SILENT = 0, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA SILENT = 0, SUMMARY, TERMINATION, LAMBDA, TRYLAMBDA, TRYCONFIG, DAMPED, TRYDELTA
}; };
static VerbosityLM verbosityLMTranslator(const std::string &s); static VerbosityLM verbosityLMTranslator(const std::string &s);

View File

@ -973,7 +973,7 @@ TEST( SmartStereoProjectionPoseFactor, HessianWithRotation ) {
// Hessian is invariant to rotations and translations in the nondegenerate case // Hessian is invariant to rotations and translations in the nondegenerate case
EXPECT( EXPECT(
assert_equal(hessianFactor->information(), assert_equal(hessianFactor->information(),
hessianFactorRotTran->information(), 1e-7)); hessianFactorRotTran->information(), 1e-6));
} }
/* *************************************************************************/ /* *************************************************************************/

View File

@ -25,7 +25,7 @@ set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_ma
if(GTSAM_BUILD_TYPE_POSTFIXES) if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper) string(TOUPPER "${build_type}" build_type_upper)
if("${build_type_upper}" STREQUAL "RELEASE") if(${build_type_upper} STREQUAL "RELEASE")
set(build_type_tag "") # Don't create release mode tag on installed directory set(build_type_tag "") # Don't create release mode tag on installed directory
else() else()
set(build_type_tag "${build_type}") set(build_type_tag "${build_type}")