Merge branch 'develop' into feature/cython_wrapper

# Conflicts:
#	wrap/Module.cpp
release/4.3a0
Duy-Nguyen Ta 2016-11-25 03:43:36 -05:00
commit e407a42160
96 changed files with 1319 additions and 879 deletions

View File

@ -117,11 +117,11 @@ if(MSVC)
endif()
endif()
find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time regex timer chrono)
find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time timer chrono)
# Required components
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY OR NOT Boost_REGEX_LIBRARY)
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
endif()
@ -129,7 +129,7 @@ option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
set(GTSAM_BOOST_LIBRARIES
${Boost_SERIALIZATION_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY}
${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_REGEX_LIBRARY})
${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY})
if (GTSAM_DISABLE_NEW_TIMERS)
message("WARNING: GTSAM timing instrumentation manually disabled")
add_definitions(-DGTSAM_DISABLE_NEW_TIMERS)

View File

@ -128,8 +128,7 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
## This needs to be fixed!!
if(UNIX AND NOT APPLE)
list(APPEND automaticDependencies ${Boost_SERIALIZATION_LIBRARY_RELEASE} ${Boost_FILESYSTEM_LIBRARY_RELEASE}
${Boost_SYSTEM_LIBRARY_RELEASE} ${Boost_THREAD_LIBRARY_RELEASE} ${Boost_DATE_TIME_LIBRARY_RELEASE}
${Boost_REGEX_LIBRARY_RELEASE})
${Boost_SYSTEM_LIBRARY_RELEASE} ${Boost_THREAD_LIBRARY_RELEASE} ${Boost_DATE_TIME_LIBRARY_RELEASE})
if(Boost_TIMER_LIBRARY_RELEASE AND NOT GTSAM_DISABLE_NEW_TIMERS) # Only present in Boost >= 1.48.0
list(APPEND automaticDependencies ${Boost_TIMER_LIBRARY_RELEASE} ${Boost_CHRONO_LIBRARY_RELEASE})
if(GTSAM_MEX_BUILD_STATIC_MODULE)

View File

@ -101,6 +101,9 @@ mark_as_advanced(GTSAM_SINGLE_TEST_EXE)
# Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck)
if(GTSAM_BUILD_TESTS)
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $<CONFIGURATION> --output-on-failure)
# Add target to build tests without running
add_custom_target(all.tests)
endif()
# Add examples target
@ -109,8 +112,6 @@ add_custom_target(examples)
# Add timing target
add_custom_target(timing)
# Add target to build tests without running
add_custom_target(all.tests)
# Implementations of this file's macros:

View File

@ -20,6 +20,7 @@
#include <boost/assign/list_of.hpp>
#include <map>
#include <iostream>
using namespace std;
using namespace gtsam;

View File

@ -1,4 +1,4 @@
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
node: 07105f7124f9aef00a68c85e0fc606e65d3d6c15
node: b9cd8366d4e8f49471c7afafc4c2a1b00e54a54d
branch: 3.2
tag: 3.2.8
tag: 3.2.10

View File

@ -31,3 +31,5 @@ ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6
b30b87236a1b1552af32ac34075ee5696a9b5a33 3.2.7
07105f7124f9aef00a68c85e0fc606e65d3d6c15 3.2.8
dc6cfdf9bcec5efc7b6593bddbbb3d675de53524 3.2.9

View File

@ -66,9 +66,8 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
: int(traits<XprType>::MaxColsAtCompileTime),
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
IsDense = is_same<StorageKind,Dense>::value,
IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
: (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
: XprTypeIsRowMajor,
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),

View File

@ -76,9 +76,7 @@ struct CommaInitializer
template<typename OtherDerived>
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{
if(other.cols()==0 || other.rows()==0)
return *this;
if (m_col==m_xpr.cols())
if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows))
{
m_row+=m_currentBlockRows;
m_col = 0;
@ -86,24 +84,18 @@ struct CommaInitializer
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert(m_col<m_xpr.cols()
eigen_assert((m_col + other.cols() <= m_xpr.cols())
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==other.rows());
if (OtherDerived::SizeAtCompileTime != Dynamic)
m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
(m_row, m_col) = other;
else
m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>
(m_row, m_col, other.rows(), other.cols()) = other;
m_col += other.cols();
return *this;
}
inline ~CommaInitializer()
{
eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
finished();
}
/** \returns the built matrix once all its coefficients have been set.
@ -113,7 +105,12 @@ struct CommaInitializer
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode
*/
inline XprType& finished() { return m_xpr; }
inline XprType& finished() {
eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0)
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
return m_xpr;
}
XprType& m_xpr; // target expression
Index m_row; // current row id

View File

@ -44,10 +44,10 @@ class DiagonalBase : public EigenBase<Derived>
template<typename DenseDerived>
void evalTo(MatrixBase<DenseDerived> &other) const;
template<typename DenseDerived>
void addTo(MatrixBase<DenseDerived> &other) const
inline void addTo(MatrixBase<DenseDerived> &other) const
{ other.diagonal() += diagonal(); }
template<typename DenseDerived>
void subTo(MatrixBase<DenseDerived> &other) const
inline void subTo(MatrixBase<DenseDerived> &other) const
{ other.diagonal() -= diagonal(); }
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
@ -98,7 +98,7 @@ class DiagonalBase : public EigenBase<Derived>
template<typename Derived>
template<typename DenseDerived>
void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
inline void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
{
other.setZero();
other.diagonal() = diagonal();

View File

@ -59,7 +59,7 @@ struct dot_nocheck<T, U, true>
*/
template<typename Derived>
template<typename OtherDerived>
typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
inline typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)

View File

@ -969,6 +969,8 @@ template<typename T>
struct functor_traits<std::not_equal_to<T> >
{ enum { Cost = 1, PacketAccess = false }; };
#if(__cplusplus < 201103L)
// std::binder* are deprecated since c++11 and will be removed in c++17
template<typename T>
struct functor_traits<std::binder2nd<T> >
{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
@ -976,6 +978,7 @@ struct functor_traits<std::binder2nd<T> >
template<typename T>
struct functor_traits<std::binder1st<T> >
{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
#endif
template<typename T>
struct functor_traits<std::unary_negate<T> >

View File

@ -205,9 +205,6 @@ class GeneralProduct<Lhs, Rhs, InnerProduct>
public:
GeneralProduct(const Lhs& lhs, const Rhs& rhs)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
}
@ -264,8 +261,6 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
}
struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };

View File

@ -183,8 +183,8 @@ template<typename Scalar, typename Packet> inline void pstoreu(Scalar* to, const
/** \internal tries to do cache prefetching of \a addr */
template<typename Scalar> inline void prefetch(const Scalar* addr)
{
#if !defined(_MSC_VER)
__builtin_prefetch(addr);
#if (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC)
__builtin_prefetch(addr);
#endif
}

View File

@ -218,8 +218,8 @@ struct conj_retval
* Implementation of abs2 *
****************************************************************************/
template<typename Scalar>
struct abs2_impl
template<typename Scalar,bool IsComplex>
struct abs2_impl_default
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline RealScalar run(const Scalar& x)
@ -228,15 +228,26 @@ struct abs2_impl
}
};
template<typename RealScalar>
struct abs2_impl<std::complex<RealScalar> >
template<typename Scalar>
struct abs2_impl_default<Scalar, true> // IsComplex
{
static inline RealScalar run(const std::complex<RealScalar>& x)
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline RealScalar run(const Scalar& x)
{
return real(x)*real(x) + imag(x)*imag(x);
}
};
template<typename Scalar>
struct abs2_impl
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline RealScalar run(const Scalar& x)
{
return abs2_impl_default<Scalar,NumTraits<Scalar>::IsComplex>::run(x);
}
};
template<typename Scalar>
struct abs2_retval
{
@ -496,11 +507,24 @@ struct floor_log2<n, lower, upper, floor_log2_bogus>
template<typename Scalar>
struct random_default_impl<Scalar, false, true>
{
typedef typename NumTraits<Scalar>::NonInteger NonInteger;
static inline Scalar run(const Scalar& x, const Scalar& y)
{
return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1)));
typedef typename conditional<NumTraits<Scalar>::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX;
if(y<x)
return x;
// the following difference might overflow on a 32 bits system,
// but since y>=x the result converted to an unsigned long is still correct.
std::size_t range = ScalarX(y)-ScalarX(x);
std::size_t offset = 0;
// rejection sampling
std::size_t divisor = 1;
std::size_t multiplier = 1;
if(range<RAND_MAX) divisor = (std::size_t(RAND_MAX)+1)/(range+1);
else multiplier = 1 + range/(std::size_t(RAND_MAX)+1);
do {
offset = (std::size_t(std::rand()) * multiplier) / divisor;
} while (offset > range);
return Scalar(ScalarX(x) + offset);
}
static inline Scalar run()

View File

@ -584,10 +584,11 @@ struct permut_matrix_product_retval
const Index n = Side==OnTheLeft ? rows() : cols();
// FIXME we need an is_same for expression that is not sensitive to constness. For instance
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
const typename Dest::Scalar *dst_data = internal::extract_data(dst);
if( is_same<MatrixTypeNestedCleaned,Dest>::value
&& blas_traits<MatrixTypeNestedCleaned>::HasUsableDirectAccess
&& blas_traits<Dest>::HasUsableDirectAccess
&& extract_data(dst) == extract_data(m_matrix))
&& dst_data!=0 && dst_data == extract_data(m_matrix))
{
// apply the permutation inplace
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());

View File

@ -315,8 +315,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
{
const OtherDerived& other = _other.derived();
internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols());
const Index othersize = other.rows()*other.cols();
internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(Index(other.rows()), Index(other.cols()));
const Index othersize = Index(other.rows())*Index(other.cols());
if(RowsAtCompileTime == 1)
{
eigen_assert(other.rows() == 1 || other.cols() == 1);
@ -487,7 +487,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
/** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
template<typename OtherDerived>
EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other)
: m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
: m_storage(Index(other.derived().rows()) * Index(other.derived().cols()), other.derived().rows(), other.derived().cols())
{
_check_template_params();
internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.derived().rows(), other.derived().cols());

View File

@ -76,9 +76,23 @@ template<typename MatrixType, int Direction> class Reverse
EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
using Base::IsRowMajor;
// next line is necessary because otherwise const version of operator()
// is hidden by non-const version defined in this file
using Base::operator();
// The following two operators are provided to worarkound
// a MSVC 2013 issue. In theory, we could simply do:
// using Base::operator();
// to make const version of operator() visible.
// Otheriwse, they would be hidden by the non-const versions defined in this file
inline CoeffReturnType operator()(Index row, Index col) const
{
eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
return coeff(row, col);
}
inline CoeffReturnType operator()(Index index) const
{
eigen_assert(index >= 0 && index < m_matrix.size());
return coeff(index);
}
protected:
enum {

View File

@ -243,7 +243,8 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
template<typename Dest> inline void evalTo(Dest& dst) const
{
if(!(is_same<RhsNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_rhs)))
const typename Dest::Scalar *dst_data = internal::extract_data(dst);
if(!(is_same<RhsNestedCleaned,Dest>::value && dst_data!=0 && extract_data(dst) == extract_data(m_rhs)))
dst = m_rhs;
m_triangularMatrix.template solveInPlace<Side>(dst);
}

View File

@ -331,11 +331,11 @@ inline void MatrixBase<Derived>::adjointInPlace()
namespace internal {
template<typename BinOp,typename NestedXpr,typename Rhs>
struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
: blas_traits<NestedXpr>
template<typename BinOp,typename Xpr,typename Rhs>
struct blas_traits<SelfCwiseBinaryOp<BinOp,Xpr,Rhs> >
: blas_traits<typename internal::remove_all<typename Xpr::Nested>::type>
{
typedef SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> XprType;
typedef SelfCwiseBinaryOp<BinOp,Xpr,Rhs> XprType;
static inline const XprType extract(const XprType& x) { return x; }
};
@ -392,7 +392,6 @@ struct checkTransposeAliasing_impl
::run(extract_data(dst), other))
&& "aliasing detected during transposition, use transposeInPlace() "
"or evaluate the rhs into a temporary using .eval()");
}
};

View File

@ -376,7 +376,8 @@ struct transposition_matrix_product_retval
const int size = m_transpositions.size();
Index j = 0;
if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
const typename Dest::Scalar *dst_data = internal::extract_data(dst);
if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && dst_data!=0 && dst_data == extract_data(m_matrix)))
dst = m_matrix;
for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)

View File

@ -81,7 +81,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
// coherence when accessing the rhs elements
std::ptrdiff_t l1, l2;
manage_caching_sizes(GetAction, &l1, &l2);
Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * otherStride) : 0;
Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * std::max<Index>(otherStride,size)) : 0;
subcols = std::max<Index>((subcols/Traits::nr)*Traits::nr, Traits::nr);
for(Index k2=IsLower ? 0 : size;

View File

@ -42,16 +42,29 @@ template<bool Conjugate> struct conj_if;
template<> struct conj_if<true> {
template<typename T>
inline T operator()(const T& x) { return numext::conj(x); }
inline T operator()(const T& x) const { return numext::conj(x); }
template<typename T>
inline T pconj(const T& x) { return internal::pconj(x); }
inline T pconj(const T& x) const { return internal::pconj(x); }
};
template<> struct conj_if<false> {
template<typename T>
inline const T& operator()(const T& x) { return x; }
inline const T& operator()(const T& x) const { return x; }
template<typename T>
inline const T& pconj(const T& x) { return x; }
inline const T& pconj(const T& x) const { return x; }
};
// Generic implementation for custom complex types.
template<typename LhsScalar, typename RhsScalar, bool ConjLhs, bool ConjRhs>
struct conj_helper
{
typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType Scalar;
EIGEN_STRONG_INLINE Scalar pmadd(const LhsScalar& x, const RhsScalar& y, const Scalar& c) const
{ return padd(c, pmul(x,y)); }
EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar& x, const RhsScalar& y) const
{ return conj_if<ConjLhs>()(x) * conj_if<ConjRhs>()(y); }
};
template<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>
@ -171,12 +184,13 @@ template<typename XprType> struct blas_traits
};
// pop conjugate
template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
template<typename Scalar, typename Xpr>
struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, Xpr> >
: blas_traits<typename internal::remove_all<typename Xpr::Nested>::type>
{
typedef typename internal::remove_all<typename Xpr::Nested>::type NestedXpr;
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> XprType;
typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, Xpr> XprType;
typedef typename Base::ExtractType ExtractType;
enum {
@ -188,12 +202,13 @@ struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >
};
// pop scalar multiple
template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
template<typename Scalar, typename Xpr>
struct blas_traits<CwiseUnaryOp<scalar_multiple_op<Scalar>, Xpr> >
: blas_traits<typename internal::remove_all<typename Xpr::Nested>::type>
{
typedef typename internal::remove_all<typename Xpr::Nested>::type NestedXpr;
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> XprType;
typedef CwiseUnaryOp<scalar_multiple_op<Scalar>, Xpr> XprType;
typedef typename Base::ExtractType ExtractType;
static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
static inline Scalar extractScalarFactor(const XprType& x)
@ -201,12 +216,13 @@ struct blas_traits<CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> >
};
// pop opposite
template<typename Scalar, typename NestedXpr>
struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
: blas_traits<NestedXpr>
template<typename Scalar, typename Xpr>
struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, Xpr> >
: blas_traits<typename internal::remove_all<typename Xpr::Nested>::type>
{
typedef typename internal::remove_all<typename Xpr::Nested>::type NestedXpr;
typedef blas_traits<NestedXpr> Base;
typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, Xpr> XprType;
typedef typename Base::ExtractType ExtractType;
static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
static inline Scalar extractScalarFactor(const XprType& x)
@ -214,13 +230,14 @@ struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
};
// pop/push transpose
template<typename NestedXpr>
struct blas_traits<Transpose<NestedXpr> >
: blas_traits<NestedXpr>
template<typename Xpr>
struct blas_traits<Transpose<Xpr> >
: blas_traits<typename internal::remove_all<typename Xpr::Nested>::type>
{
typedef typename internal::remove_all<typename Xpr::Nested>::type NestedXpr;
typedef typename NestedXpr::Scalar Scalar;
typedef blas_traits<NestedXpr> Base;
typedef Transpose<NestedXpr> XprType;
typedef Transpose<Xpr> XprType;
typedef Transpose<const typename Base::_ExtractType> ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS
typedef Transpose<const typename Base::_ExtractType> _ExtractType;
typedef typename conditional<bool(Base::HasUsableDirectAccess),

View File

@ -162,7 +162,7 @@ const unsigned int HereditaryBits = RowMajorBit
/** \ingroup enums
* Enum containing possible values for the \p Mode parameter of
* MatrixBase::selfadjointView() and MatrixBase::triangularView(). */
enum {
enum UpLoType {
/** View matrix as a lower triangular matrix. */
Lower=0x1,
/** View matrix as an upper triangular matrix. */
@ -187,7 +187,7 @@ enum {
/** \ingroup enums
* Enum for indicating whether an object is aligned or not. */
enum {
enum AlignmentType {
/** Object is not correctly aligned for vectorization. */
Unaligned=0,
/** Object is aligned for vectorization. */
@ -217,7 +217,7 @@ enum DirectionType {
/** \internal \ingroup enums
* Enum to specify how to traverse the entries of a matrix. */
enum {
enum TraversalType {
/** \internal Default traversal, no vectorization, no index-based access */
DefaultTraversal,
/** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
@ -239,7 +239,7 @@ enum {
/** \internal \ingroup enums
* Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
enum {
enum UnrollingType {
/** \internal Do not unroll loops. */
NoUnrolling,
/** \internal Unroll only the inner loop, but not the outer loop. */
@ -251,7 +251,7 @@ enum {
/** \internal \ingroup enums
* Enum to specify whether to use the default (built-in) implementation or the specialization. */
enum {
enum SpecializedType {
Specialized,
BuiltIn
};
@ -259,7 +259,7 @@ enum {
/** \ingroup enums
* Enum containing possible values for the \p _Options template parameter of
* Matrix, Array and BandMatrix. */
enum {
enum StorageOptions {
/** Storage order is column major (see \ref TopicStorageOrders). */
ColMajor = 0,
/** Storage order is row major (see \ref TopicStorageOrders). */
@ -272,7 +272,7 @@ enum {
/** \ingroup enums
* Enum for specifying whether to apply or solve on the left or right. */
enum {
enum SideType {
/** Apply transformation on the left. */
OnTheLeft = 1,
/** Apply transformation on the right. */
@ -418,7 +418,7 @@ namespace Architecture
/** \internal \ingroup enums
* Enum used as template parameter in GeneralProduct. */
enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
enum ProductImplType { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
/** \internal \ingroup enums
* Enum used in experimental parallel implementation. */

View File

@ -35,6 +35,14 @@
#pragma clang diagnostic push
#endif
#pragma clang diagnostic ignored "-Wconstant-logical-operand"
#elif defined __GNUC__ && __GNUC__>=6
#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
#pragma GCC diagnostic push
#endif
#pragma GCC diagnostic ignored "-Wignored-attributes"
#endif
#endif // not EIGEN_WARNINGS_DISABLED

View File

@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 8
#define EIGEN_MINOR_VERSION 10
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \

View File

@ -507,7 +507,12 @@ template<typename T> void smart_copy(const T* start, const T* end, T* target)
template<typename T> struct smart_copy_helper<T,true> {
static inline void run(const T* start, const T* end, T* target)
{ memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
{
std::ptrdiff_t size = std::ptrdiff_t(end)-std::ptrdiff_t(start);
if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0);
memcpy(target, start, size);
}
};
template<typename T> struct smart_copy_helper<T,false> {
@ -515,7 +520,6 @@ template<typename T> struct smart_copy_helper<T,false> {
{ std::copy(start, end, target); }
};
/*****************************************************************************
*** Implementation of runtime stack allocation (falling back to malloc) ***
*****************************************************************************/
@ -655,99 +659,60 @@ template<typename T> class aligned_stack_memory_handler
/****************************************************************************/
/** \class aligned_allocator
* \ingroup Core_Module
*
* \brief STL compatible allocator to use with with 16 byte aligned types
*
* Example:
* \code
* // Matrix4f requires 16 bytes alignment:
* std::map< int, Matrix4f, std::less<int>,
* aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
* std::map< int, Vector3f > my_map_vec3;
* \endcode
*
* \sa \ref TopicStlContainers.
*/
* \ingroup Core_Module
*
* \brief STL compatible allocator to use with with 16 byte aligned types
*
* Example:
* \code
* // Matrix4f requires 16 bytes alignment:
* std::map< int, Matrix4f, std::less<int>,
* aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
* std::map< int, Vector3f > my_map_vec3;
* \endcode
*
* \sa \blank \ref TopicStlContainers.
*/
template<class T>
class aligned_allocator
class aligned_allocator : public std::allocator<T>
{
public:
typedef size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
typedef size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class U>
struct rebind
{
typedef aligned_allocator<U> other;
};
template<class U>
struct rebind
{
typedef aligned_allocator<U> other;
};
pointer address( reference value ) const
{
return &value;
}
aligned_allocator() : std::allocator<T>() {}
const_pointer address( const_reference value ) const
{
return &value;
}
aligned_allocator(const aligned_allocator& other) : std::allocator<T>(other) {}
aligned_allocator()
{
}
template<class U>
aligned_allocator(const aligned_allocator<U>& other) : std::allocator<T>(other) {}
aligned_allocator( const aligned_allocator& )
{
}
~aligned_allocator() {}
template<class U>
aligned_allocator( const aligned_allocator<U>& )
{
}
pointer allocate(size_type num, const void* /*hint*/ = 0)
{
internal::check_size_for_overflow<T>(num);
return static_cast<pointer>( internal::aligned_malloc(num * sizeof(T)) );
}
~aligned_allocator()
{
}
size_type max_size() const
{
return (std::numeric_limits<size_type>::max)();
}
pointer allocate( size_type num, const void* hint = 0 )
{
EIGEN_UNUSED_VARIABLE(hint);
internal::check_size_for_overflow<T>(num);
return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
}
void construct( pointer p, const T& value )
{
::new( p ) T( value );
}
void destroy( pointer p )
{
p->~T();
}
void deallocate( pointer p, size_type /*num*/ )
{
internal::aligned_free( p );
}
bool operator!=(const aligned_allocator<T>& ) const
{ return false; }
bool operator==(const aligned_allocator<T>& ) const
{ return true; }
void deallocate(pointer p, size_type /*num*/)
{
internal::aligned_free(p);
}
};
//---------- Cache sizes ----------

View File

@ -8,7 +8,10 @@
#pragma warning pop
#elif defined __clang__
#pragma clang diagnostic pop
#elif defined __GNUC__ && __GNUC__>=6
#pragma GCC diagnostic pop
#endif
#endif
#endif // EIGEN_WARNINGS_DISABLED

View File

@ -327,13 +327,33 @@ GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixTyp
}
else
{
Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1));
Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1)));
m_alphas.coeffRef(i) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z);
m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z);
// We need to extract the generalized eigenvalues of the pair of a general 2x2 block S and a triangular 2x2 block T
// From the eigen decomposition of T = U * E * U^-1,
// we can extract the eigenvalues of (U^-1 * S * U) / E
// Here, we can take advantage that E = diag(T), and U = [ 1 T_01 ; 0 T_11-T_00], and U^-1 = [1 -T_11/(T_11-T_00) ; 0 1/(T_11-T_00)].
// Then taking beta=T_00*T_11*(T_11-T_00), we can avoid any division, and alpha is the eigenvalues of A = (U^-1 * S * U) * diag(T_11,T_00) * (T_11-T_00):
// T = [a b ; 0 c]
// S = [e f ; g h]
RealScalar a = m_realQZ.matrixT().coeff(i, i), b = m_realQZ.matrixT().coeff(i, i+1), c = m_realQZ.matrixT().coeff(i+1, i+1);
RealScalar e = m_matS.coeff(i, i), f = m_matS.coeff(i, i+1), g = m_matS.coeff(i+1, i), h = m_matS.coeff(i+1, i+1);
RealScalar d = c-a;
RealScalar gb = g*b;
Matrix<RealScalar,2,2> A;
A << (e*d-gb)*c, ((e*b+f*d-h*b)*d-gb*b)*a,
g*c , (gb+h*d)*a;
// NOTE, we could also compute the SVD of T's block during the QZ factorization so that the respective T block is guaranteed to be diagonal,
// and then we could directly apply the formula below (while taking care of scaling S columns by T11,T00):
Scalar p = Scalar(0.5) * (A.coeff(i, i) - A.coeff(i+1, i+1));
Scalar z = sqrt(abs(p * p + A.coeff(i+1, i) * A.coeff(i, i+1)));
m_alphas.coeffRef(i) = ComplexScalar(A.coeff(i+1, i+1) + p, z);
m_alphas.coeffRef(i+1) = ComplexScalar(A.coeff(i+1, i+1) + p, -z);
m_betas.coeffRef(i) =
m_betas.coeffRef(i+1) = a*c*d;
m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i);
m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i);
i += 2;
}
}

View File

@ -367,10 +367,10 @@ void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
* (conj(h) * matA.col(i).tail(remainingSize)));
hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
.rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1);
.rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1));
matA.col(i).coeffRef(i+1) = beta;
hCoeffs.coeffRef(i) = h;

View File

@ -75,7 +75,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
inline Scalar coeff(Index row, Index col) const
inline Scalar coeff(Index row, Index col=0) const
{
if( (int(Direction)==Vertical && row==m_matrix.rows())
|| (int(Direction)==Horizontal && col==m_matrix.cols()))

View File

@ -276,7 +276,7 @@ public:
inline Coefficients& coeffs() { return m_coeffs;}
inline const Coefficients& coeffs() const { return m_coeffs;}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned)
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(IsAligned))
protected:
Coefficients m_coeffs;

View File

@ -443,7 +443,7 @@ public:
operator * (const DiagonalBase<DiagonalDerived> &b) const
{
TransformTimeDiagonalReturnType res(*this);
res.linear() *= b;
res.linearExt() *= b;
return res;
}
@ -557,7 +557,7 @@ public:
return res;
}
inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linear() *= s; return *this; }
inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linearExt() *= s; return *this; }
template<typename Derived>
inline Transform& operator=(const RotationBase<Derived,Dim>& r);
@ -828,7 +828,7 @@ Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &oth
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
m_matrix.template block<Dim,HDim>(0,0).noalias() = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0));
affine().noalias() = (other.asDiagonal() * affine());
return *this;
}
@ -1048,7 +1048,7 @@ void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixTy
}
}
/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
/** decomposes the linear part of the transformation as a product scaling x rotation, the scaling being
* not necessarily positive.
*
* If either pointer is zero, the corresponding computation is skipped.

View File

@ -130,8 +130,10 @@ public:
}
/** Applies translation to vector */
inline VectorType operator* (const VectorType& other) const
{ return m_coeffs + other; }
template<typename Derived>
inline typename internal::enable_if<Derived::IsVectorAtCompileTime,VectorType>::type
operator* (const MatrixBase<Derived>& vec) const
{ return m_coeffs + vec.derived(); }
/** \returns the inverse translation (opposite) */
Translation inverse() const { return Translation(-m_coeffs); }

View File

@ -75,8 +75,9 @@ void MatrixBase<Derived>::makeHouseholder(
RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
Scalar c0 = coeff(0);
const RealScalar tol = (std::numeric_limits<RealScalar>::min)();
if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0))
if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol)
{
tau = RealScalar(0);
beta = numext::real(c0);

View File

@ -237,8 +237,9 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
{
workspace.resize(rows());
Index vecs = m_length;
const typename Dest::Scalar *dst_data = internal::extract_data(dst);
if( internal::is_same<typename internal::remove_all<VectorsType>::type,Dest>::value
&& internal::extract_data(dst) == internal::extract_data(m_vectors))
&& dst_data!=0 && dst_data == internal::extract_data(m_vectors))
{
// in-place
dst.diagonal().setOnes();

View File

@ -290,7 +290,7 @@ struct inverse_impl : public ReturnByValue<inverse_impl<MatrixType> >
{
const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime);
EIGEN_ONLY_USED_FOR_DEBUG(Size);
eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst)))
eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=0 && extract_data(m_matrix)!=extract_data(dst)))
&& "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
compute_inverse<MatrixTypeNestedCleaned, Dest>::run(m_matrix, dst);

View File

@ -151,10 +151,12 @@ struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
iC = _mm_mul_ps(rd,iC);
iD = _mm_mul_ps(rd,iD);
result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77));
result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22));
result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77));
result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22));
DenseIndex res_stride = result.outerStride();
float* res = result.data();
pstoret<float, Packet4f, ResultAlignment>(res+0, _mm_shuffle_ps(iA,iB,0x77));
pstoret<float, Packet4f, ResultAlignment>(res+res_stride, _mm_shuffle_ps(iA,iB,0x22));
pstoret<float, Packet4f, ResultAlignment>(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77));
pstoret<float, Packet4f, ResultAlignment>(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22));
}
};
@ -311,14 +313,16 @@ struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det
result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det
result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det
result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det
result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
DenseIndex res_stride = result.outerStride();
double* res = result.data();
pstoret<double, Packet2d, ResultAlignment>(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
}
};

View File

@ -10,6 +10,14 @@
#ifndef EIGEN_PASTIXSUPPORT_H
#define EIGEN_PASTIXSUPPORT_H
#if defined(DCOMPLEX)
#define PASTIX_COMPLEX COMPLEX
#define PASTIX_DCOMPLEX DCOMPLEX
#else
#define PASTIX_COMPLEX std::complex<float>
#define PASTIX_DCOMPLEX std::complex<double>
#endif
namespace Eigen {
/** \ingroup PaStiXSupport_Module
@ -74,14 +82,14 @@ namespace internal
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<COMPLEX*>(vals), perm, invp, reinterpret_cast<COMPLEX*>(x), nbrhs, iparm, dparm);
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
}
void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<DCOMPLEX*>(vals), perm, invp, reinterpret_cast<DCOMPLEX*>(x), nbrhs, iparm, dparm);
z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm);
}
// Convert the matrix to Fortran-style Numbering

View File

@ -221,11 +221,11 @@ class PardisoImpl
m_type = type;
bool symmetric = std::abs(m_type) < 10;
m_iparm[0] = 1; // No solver default
m_iparm[1] = 3; // use Metis for the ordering
m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS
m_iparm[1] = 2; // use Metis for the ordering
m_iparm[2] = 0; // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??)
m_iparm[3] = 0; // No iterative-direct algorithm
m_iparm[4] = 0; // No user fill-in reducing permutation
m_iparm[5] = 0; // Write solution into x
m_iparm[5] = 0; // Write solution into x, b is left unchanged
m_iparm[6] = 0; // Not in use
m_iparm[7] = 2; // Max numbers of iterative refinement steps
m_iparm[8] = 0; // Not in use
@ -246,7 +246,10 @@ class PardisoImpl
m_iparm[26] = 0; // No matrix checker
m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;
m_iparm[34] = 1; // C indexing
m_iparm[59] = 1; // Automatic switch between In-Core and Out-of-Core modes
m_iparm[36] = 0; // CSR
m_iparm[59] = 0; // 0 - In-Core ; 1 - Automatic switch between In-Core and Out-of-Core modes ; 2 - Out-of-Core
memset(m_pt, 0, sizeof(m_pt));
}
protected:
@ -384,7 +387,6 @@ bool PardisoImpl<Base>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerive
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
rhs_ptr, x.derived().data());
return error==0;
}
@ -397,6 +399,9 @@ bool PardisoImpl<Base>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerive
* using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.
* The vectors or matrices X and B can be either dense or sparse.
*
* By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:
* \code solver.pardisoParameterArray()[59] = 1; \endcode
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
*
* \sa \ref TutorialSparseDirectSolvers
@ -447,6 +452,9 @@ class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
* using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.
* The vectors or matrices X and B can be either dense or sparse.
*
* By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:
* \code solver.pardisoParameterArray()[59] = 1; \endcode
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.
* Upper|Lower can be used to tell both triangular parts can be used as input.
@ -507,6 +515,9 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
* For complex matrices, A can also be symmetric only, see the \a Options template parameter.
* The vectors or matrices X and B can be either dense or sparse.
*
* By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:
* \code solver.pardisoParameterArray()[59] = 1; \endcode
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.
* Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.

View File

@ -127,9 +127,6 @@ template<typename _MatrixType> class ColPivHouseholderQR
*
* \returns a solution.
*
* \note The case where b is a matrix is not yet implemented. Also, this
* code is space inefficient.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution

View File

@ -134,9 +134,6 @@ template<typename _MatrixType> class FullPivHouseholderQR
* \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A,
* and an arbitrary solution otherwise.
*
* \note The case where b is a matrix is not yet implemented. Also, this
* code is space inefficient.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution

View File

@ -107,9 +107,6 @@ template<typename _MatrixType> class HouseholderQR
*
* \returns a solution.
*
* \note The case where b is a matrix is not yet implemented. Also, this
* code is space inefficient.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution

View File

@ -359,29 +359,42 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename SVD::Index Index;
static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
};
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename SVD::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename SVD::Index Index;
static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
{
using std::sqrt;
using std::abs;
using std::max;
Scalar z;
JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision = NumTraits<Scalar>::epsilon();
if(n==0)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
if(work_matrix.coeff(q,q)!=Scalar(0))
// make sure first column is zero
work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
// work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
@ -395,19 +408,25 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
rot.s() = work_matrix.coeff(q,p) / n;
work_matrix.applyOnTheLeft(p,q,rot);
if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
if(work_matrix.coeff(p,q) != Scalar(0))
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.col(q) *= z;
if(svd.computeV()) svd.m_matrixV.col(q) *= z;
}
if(work_matrix.coeff(q,q) != Scalar(0))
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
}
// update largest diagonal entry
maxDiagEntry = max EIGEN_EMPTY (maxDiagEntry,max EIGEN_EMPTY (abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));
// and check whether the 2x2 block is already diagonal
RealScalar threshold = max EIGEN_EMPTY (considerAsZero, precision * maxDiagEntry);
return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;
}
};
@ -424,22 +443,23 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
if(t == RealScalar(0))
if(d == RealScalar(0))
{
rot1.c() = RealScalar(0);
rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
rot1.s() = RealScalar(0);
rot1.c() = RealScalar(1);
}
else
{
RealScalar t2d2 = numext::hypot(t,d);
rot1.c() = abs(t)/t2d2;
rot1.s() = d/t2d2;
if(t<RealScalar(0))
rot1.s() = -rot1.s();
// If d!=0, then t/d cannot overflow because the magnitude of the
// entries forming d are not too small compared to the ones forming t.
RealScalar u = t / d;
RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));
rot1.s() = RealScalar(1) / tmp;
rot1.c() = u / tmp;
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
*j_left = rot1 * j_right->transpose();
*j_left = rot1 * j_right->transpose();
}
} // end namespace internal
@ -826,6 +846,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
check_template_parameters();
using std::abs;
using std::max;
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
@ -857,6 +878,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
}
/*** step 2. The main Jacobi SVD iteration. ***/
RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();
bool finished = false;
while(!finished)
@ -872,25 +894,27 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
using std::max;
RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
abs(m_workMatrix.coeff(q,q))));
// We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
RealScalar threshold = max EIGEN_EMPTY (considerAsZero, precision * maxDiagEntry);
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
JacobiRotation<RealScalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
// the complex to real operation returns true is the updated 2x2 block is not already diagonal
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
{
JacobiRotation<RealScalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
// accumulate resulting Jacobi rotations
m_workMatrix.applyOnTheLeft(p,q,j_left);
if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
// accumulate resulting Jacobi rotations
m_workMatrix.applyOnTheLeft(p,q,j_left);
if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
m_workMatrix.applyOnTheRight(p,q,j_right);
if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
m_workMatrix.applyOnTheRight(p,q,j_right);
if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
// keep track of the largest diagonal coefficient
maxDiagEntry = max EIGEN_EMPTY (maxDiagEntry,max EIGEN_EMPTY (abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));
}
}
}
}

View File

@ -102,6 +102,11 @@ class CompressedStorage
inline size_t allocatedSize() const { return m_allocatedSize; }
inline void clear() { m_size = 0; }
const Scalar* valuePtr() const { return m_values; }
Scalar* valuePtr() { return m_values; }
const Index* indexPtr() const { return m_indices; }
Index* indexPtr() { return m_indices; }
inline Scalar& value(size_t i) { return m_values[i]; }
inline const Scalar& value(size_t i) const { return m_values[i]; }
@ -208,8 +213,10 @@ class CompressedStorage
Index* newIndices = new Index[size];
size_t copySize = (std::min)(size, m_size);
// copy
internal::smart_copy(m_values, m_values+copySize, newValues);
internal::smart_copy(m_indices, m_indices+copySize, newIndices);
if (copySize>0) {
internal::smart_copy(m_values, m_values+copySize, newValues);
internal::smart_copy(m_indices, m_indices+copySize, newIndices);
}
// delete old stuff
delete[] m_values;
delete[] m_indices;

File diff suppressed because it is too large Load Diff

View File

@ -128,20 +128,20 @@ class SparseMatrix
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline const Scalar* valuePtr() const { return &m_data.value(0); }
inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline Scalar* valuePtr() { return &m_data.value(0); }
inline Scalar* valuePtr() { return m_data.valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline const Index* innerIndexPtr() const { return &m_data.index(0); }
inline const Index* innerIndexPtr() const { return m_data.indexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline Index* innerIndexPtr() { return &m_data.index(0); }
inline Index* innerIndexPtr() { return m_data.indexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
@ -697,8 +697,8 @@ class SparseMatrix
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows());
Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_data.indexPtr(), rows()).setLinSpaced(0, rows()-1);
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(this->m_data.valuePtr(), rows()).setOnes();
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
std::free(m_innerNonZeros);
m_innerNonZeros = 0;

View File

@ -38,6 +38,7 @@ template<typename Derived> class SparseMatrixBase
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Index StorageIndex;
typedef typename internal::add_const_on_value_type_if_arithmetic<
typename internal::packet_traits<Scalar>::type
>::type PacketReturnType;

View File

@ -29,7 +29,10 @@ typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
SparseMatrix<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
if(this->isCompressed())
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
else
return Base::sum();
}
template<typename _Scalar, int _Options, typename _Index>
@ -37,7 +40,7 @@ typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
SparseVector<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
}
} // end namespace Eigen

View File

@ -48,7 +48,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
res.resize(rows, cols);
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
for (Index j=0; j<cols; ++j)
{
// FIXME:

View File

@ -84,12 +84,12 @@ class SparseVector
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return m_data.indexPtr(); }
EIGEN_STRONG_INLINE Index* innerIndexPtr() { return m_data.indexPtr(); }
EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */

View File

@ -148,7 +148,8 @@ class UmfPackLU : internal::noncopyable
UmfPackLU() { init(); }
UmfPackLU(const MatrixType& matrix)
template<typename InputMatrixType>
UmfPackLU(const InputMatrixType& matrix)
{
init();
compute(matrix);

View File

@ -44,15 +44,10 @@
#define BTL_ASM_COMMENT(X)
#endif
#if (defined __GNUC__) && (!defined __INTEL_COMPILER) && !defined(__arm__) && !defined(__powerpc__)
#define BTL_DISABLE_SSE_EXCEPTIONS() { \
int aux; \
asm( \
"stmxcsr %[aux] \n\t" \
"orl $32832, %[aux] \n\t" \
"ldmxcsr %[aux] \n\t" \
: : [aux] "m" (aux)); \
}
#ifdef __SSE__
#include "xmmintrin.h"
// This enables flush to zero (FTZ) and denormals are zero (DAZ) modes:
#define BTL_DISABLE_SSE_EXCEPTIONS() { _mm_setcsr(_mm_getcsr() | 0x8040); }
#else
#define BTL_DISABLE_SSE_EXCEPTIONS()
#endif

View File

@ -1,7 +1,7 @@
include(EigenTesting)
include(CheckCXXSourceCompiles)
# configure the "site" and "buildname"
# configure the "site" and "buildname"
ei_set_sitename()
# retrieve and store the build string
@ -14,17 +14,10 @@ add_dependencies(check buildtests)
# check whether /bin/bash exists
find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH)
# CMake/Ctest does not allow us to change the build command,
# so we have to workaround by directly editing the generated DartConfiguration.tcl file
# save CMAKE_MAKE_PROGRAM
set(CMAKE_MAKE_PROGRAM_SAVE ${CMAKE_MAKE_PROGRAM})
# and set a fake one
set(CMAKE_MAKE_PROGRAM "@EIGEN_MAKECOMMAND_PLACEHOLDER@")
# This call activates testing and generates the DartConfiguration.tcl
include(CTest)
set(EIGEN_TEST_BUILD_FLAGS " " CACHE STRING "Options passed to the build command of unit tests")
set(EIGEN_TEST_BUILD_FLAGS "" CACHE STRING "Options passed to the build command of unit tests")
# Overwrite default DartConfiguration.tcl such that ctest can build our unit tests.
# Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target.

View File

@ -26,7 +26,7 @@ function(DetermineShortWindowsName WIN_VERSION win_num_version)
endfunction()
function(DetermineOSVersion OS_VERSION)
if (WIN32)
if (WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
file (TO_NATIVE_PATH "$ENV{COMSPEC}" SHELL)
exec_program( ${SHELL} ARGS "/c" "ver" OUTPUT_VARIABLE ver_output)

View File

@ -46,6 +46,9 @@ They are summarized in the following table:
<tr><td>SPQR</td><td>\link SPQRSupport_Module SPQRSupport \endlink </td> <td> QR factorization </td>
<td> Any, rectangular</td><td>fill-in reducing, multithreaded, fast dense algebra</td>
<td> requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td><td>recommended for linear least-squares problems, has a rank-revealing feature</tr>
<tr><td>PardisoLLT \n PardisoLDLT \n PardisoLU</td><td>\link PardisoSupport_Module PardisoSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
<td>Requires the <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">Intel MKL</a> package, \b Proprietary </td>
<td>optimized for tough problems patterns, see also \link TopicUsingIntelMKL using MKL with Eigen \endlink</td></tr>
</table>
Here \c SPD means symmetric positive definite.

View File

@ -16,7 +16,7 @@ Both eigen_assert and eigen_plain_assert are defined in Macros.h. Defining eigen
#include <stdexcept>
#undef eigen_assert
#define eigen_assert(x) \
if (!x) { throw (std::runtime_error("Put your message here")); }
if (!(x)) { throw (std::runtime_error("Put your message here")); }
\endcode
\subsection DisableAssert Disabling assertions

View File

@ -125,6 +125,7 @@ endif(TEST_LIB)
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Official")
add_custom_target(BuildOfficial)
ei_add_test(rand)
ei_add_test(meta)
ei_add_test(sizeof)
ei_add_test(dynalloc)

View File

@ -9,14 +9,69 @@
#include "main.h"
template<int M1, int M2, int N1, int N2>
void test_blocks()
{
Matrix<int, M1+M2, N1+N2> m_fixed;
MatrixXi m_dynamic(M1+M2, N1+N2);
Matrix<int, M1, N1> mat11; mat11.setRandom();
Matrix<int, M1, N2> mat12; mat12.setRandom();
Matrix<int, M2, N1> mat21; mat21.setRandom();
Matrix<int, M2, N2> mat22; mat22.setRandom();
MatrixXi matx11 = mat11, matx12 = mat12, matx21 = mat21, matx22 = mat22;
{
VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat21, matx22).finished(), (m_dynamic << mat11, matx12, mat21, matx22).finished());
VERIFY_IS_EQUAL((m_fixed.template topLeftCorner<M1,N1>()), mat11);
VERIFY_IS_EQUAL((m_fixed.template topRightCorner<M1,N2>()), mat12);
VERIFY_IS_EQUAL((m_fixed.template bottomLeftCorner<M2,N1>()), mat21);
VERIFY_IS_EQUAL((m_fixed.template bottomRightCorner<M2,N2>()), mat22);
VERIFY_IS_EQUAL((m_fixed << mat12, mat11, matx21, mat22).finished(), (m_dynamic << mat12, matx11, matx21, mat22).finished());
}
if(N1 > 0)
{
VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22));
VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22));
}
else
{
// allow insertion of zero-column blocks:
VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat11, mat11, mat21, mat21, mat22).finished(), (m_dynamic << mat12, mat22).finished());
}
if(M1 != M2)
{
VERIFY_RAISES_ASSERT((m_fixed << mat11, mat21, mat12, mat22));
}
}
template<int N>
struct test_block_recursion
{
static void run()
{
test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>();
test_block_recursion<N-1>::run();
}
};
template<>
struct test_block_recursion<-1>
{
static void run() { }
};
void test_commainitializer()
{
Matrix3d m3;
Matrix4d m4;
VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) );
#ifndef _MSC_VER
VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) );
VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) );
#endif
@ -43,4 +98,7 @@ void test_commainitializer()
4, 5, 6,
vec[2].transpose();
VERIFY_IS_APPROX(m3, ref);
// recursively test all block-sizes from 0 to 3:
test_block_recursion<(1<<8) - 1>();
}

View File

@ -164,9 +164,12 @@ template<typename MatrixType> void cwiseops(const MatrixType& m)
VERIFY( (m1.cwise().min(m2).cwise() < (m1+mones)).all() );
VERIFY( (m1.cwise().max(m2).cwise() > (m1-mones)).all() );
#if(__cplusplus < 201103L)
// std::binder* are deprecated since c++11 and will be removed in c++17
VERIFY( (m1.cwise()<m1.unaryExpr(bind2nd(plus<Scalar>(), Scalar(1)))).all() );
VERIFY( !(m1.cwise()<m1bis.unaryExpr(bind2nd(minus<Scalar>(), Scalar(1)))).all() );
VERIFY( !(m1.cwise()>m1bis.unaryExpr(bind2nd(plus<Scalar>(), Scalar(1)))).any() );
#endif
cwiseops_real_only(m1, m2, m3, mones);
}

View File

@ -137,9 +137,12 @@ template<typename MatrixType> void cwiseops(const MatrixType& m)
VERIFY( (m1.cwise().min(m2).cwise() < (m1+mones)).all() );
VERIFY( (m1.cwise().max(m2).cwise() > (m1-mones)).all() );
#if(__cplusplus < 201103L)
// std::binder* are deprecated since c++11 and will be removed in c++17
VERIFY( (m1.cwise()<m1.unaryExpr(bind2nd(plus<Scalar>(), Scalar(1)))).all() );
VERIFY( !(m1.cwise()<m1.unaryExpr(bind2nd(minus<Scalar>(), Scalar(1)))).all() );
VERIFY( !(m1.cwise()>m1.unaryExpr(bind2nd(plus<Scalar>(), Scalar(1)))).any() );
#endif
}
void test_eigen2_cwiseop()

View File

@ -54,6 +54,8 @@ template<typename Scalar,int Size> void homogeneous(void)
T2MatrixType t2 = T2MatrixType::Random();
VERIFY_IS_APPROX(t2 * (v0.homogeneous().eval()), t2 * v0.homogeneous());
VERIFY_IS_APPROX(t2 * (m0.colwise().homogeneous().eval()), t2 * m0.colwise().homogeneous());
VERIFY_IS_APPROX(t2 * (v0.homogeneous().asDiagonal()), t2 * hv0.asDiagonal());
VERIFY_IS_APPROX((v0.homogeneous().asDiagonal()) * t2, hv0.asDiagonal() * t2);
VERIFY_IS_APPROX((v0.transpose().rowwise().homogeneous().eval()) * t2,
v0.transpose().rowwise().homogeneous() * t2);

View File

@ -320,6 +320,9 @@ template<typename Scalar, int Mode, int Options> void transformations()
t0.scale(v0);
t1 *= AlignedScaling3(v0);
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t1 = AlignedScaling3(v0) * (Translation3(v0) * Transform3(q1));
t1 = t1 * v0.asDiagonal();
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
// transformation * translation
t0.translate(v0);
t1 = t1 * Translation3(v0);
@ -437,6 +440,79 @@ template<typename Scalar, int Mode, int Options> void transformations()
Rotation2D<Scalar> r2(r1); // copy ctor
VERIFY_IS_APPROX(r2.angle(),s0);
}
{
Transform3 t32(Matrix4::Random()), t33, t34;
t34 = t33 = t32;
t32.scale(v0);
t33*=AlignedScaling3(v0);
VERIFY_IS_APPROX(t32.matrix(), t33.matrix());
t33 = t34 * AlignedScaling3(v0);
VERIFY_IS_APPROX(t32.matrix(), t33.matrix());
}
}
template<typename A1, typename A2, typename P, typename Q, typename V, typename H>
void transform_associativity_left(const A1& a1, const A2& a2, const P& p, const Q& q, const V& v, const H& h)
{
VERIFY_IS_APPROX( q*(a1*v), (q*a1)*v );
VERIFY_IS_APPROX( q*(a2*v), (q*a2)*v );
VERIFY_IS_APPROX( q*(p*h).hnormalized(), ((q*p)*h).hnormalized() );
}
template<typename A1, typename A2, typename P, typename Q, typename V, typename H>
void transform_associativity2(const A1& a1, const A2& a2, const P& p, const Q& q, const V& v, const H& h)
{
VERIFY_IS_APPROX( a1*(q*v), (a1*q)*v );
VERIFY_IS_APPROX( a2*(q*v), (a2*q)*v );
VERIFY_IS_APPROX( p *(q*v).homogeneous(), (p *q)*v.homogeneous() );
transform_associativity_left(a1, a2,p, q, v, h);
}
template<typename Scalar, int Dim, int Options,typename RotationType>
void transform_associativity(const RotationType& R)
{
typedef Matrix<Scalar,Dim,1> VectorType;
typedef Matrix<Scalar,Dim+1,1> HVectorType;
typedef Matrix<Scalar,Dim,Dim> LinearType;
typedef Matrix<Scalar,Dim+1,Dim+1> MatrixType;
typedef Transform<Scalar,Dim,AffineCompact,Options> AffineCompactType;
typedef Transform<Scalar,Dim,Affine,Options> AffineType;
typedef Transform<Scalar,Dim,Projective,Options> ProjectiveType;
typedef DiagonalMatrix<Scalar,Dim> ScalingType;
typedef Translation<Scalar,Dim> TranslationType;
AffineCompactType A1c; A1c.matrix().setRandom();
AffineCompactType A2c; A2c.matrix().setRandom();
AffineType A1(A1c);
AffineType A2(A2c);
ProjectiveType P1; P1.matrix().setRandom();
VectorType v1 = VectorType::Random();
VectorType v2 = VectorType::Random();
HVectorType h1 = HVectorType::Random();
Scalar s1 = internal::random<Scalar>();
LinearType L = LinearType::Random();
MatrixType M = MatrixType::Random();
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, A2, v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, A2c, v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, v1.asDiagonal(), v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, ScalingType(v1), v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, Scaling(v1), v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, Scaling(s1), v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, TranslationType(v1), v2, h1) );
CALL_SUBTEST( transform_associativity_left(A1c, A1, P1, L, v2, h1) );
CALL_SUBTEST( transform_associativity2(A1c, A1, P1, R, v2, h1) );
VERIFY_IS_APPROX( A1*(M*h1), (A1*M)*h1 );
VERIFY_IS_APPROX( A1c*(M*h1), (A1c*M)*h1 );
VERIFY_IS_APPROX( P1*(M*h1), (P1*M)*h1 );
VERIFY_IS_APPROX( M*(A1*h1), (M*A1)*h1 );
VERIFY_IS_APPROX( M*(A1c*h1), (M*A1c)*h1 );
VERIFY_IS_APPROX( M*(P1*h1), ((M*P1)*h1) );
}
template<typename Scalar> void transform_alignment()
@ -517,5 +593,8 @@ void test_geo_transformations()
CALL_SUBTEST_7(( transform_products<double,3,RowMajor|AutoAlign>() ));
CALL_SUBTEST_7(( transform_products<float,2,AutoAlign>() ));
CALL_SUBTEST_8(( transform_associativity<double,2,ColMajor>(Rotation2D<double>(internal::random<double>()*double(3.14))) ));
CALL_SUBTEST_8(( transform_associativity<double,3,ColMajor>(Quaterniond(Vector4d::Random().normalized())) ));
}
}

View File

@ -327,6 +327,7 @@ template<typename Scalar,bool ConjLhs,bool ConjRhs> void test_conj_helper(Scalar
ref[i] += cj0(data1[i]) * cj1(data2[i]);
VERIFY(internal::isApprox(ref[i], cj.pmadd(data1[i],data2[i],tmp)) && "conj_helper pmadd");
}
*pval += 0; // Workaround msvc 2013 issue (bad code generation)
internal::pstore(pval,pcj.pmadd(internal::pload<Packet>(data1),internal::pload<Packet>(data2),internal::pload<Packet>(pval)));
VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmadd");
}

View File

@ -53,14 +53,29 @@ template<typename MatrixType> void inverse_general_4x4(int repeat)
// FIXME that 1.25 used to be 1.2 until we tested gcc 4.1 on 30 June 2010 and got 1.21.
VERIFY(error_avg < (NumTraits<Scalar>::IsComplex ? 8.0 : 1.25));
VERIFY(error_max < (NumTraits<Scalar>::IsComplex ? 64.0 : 20.0));
{
int s = 5;//internal::random<int>(4,10);
int i = 0;//internal::random<int>(0,s-4);
int j = 0;//internal::random<int>(0,s-4);
Matrix<Scalar,5,5> mat(s,s);
mat.setRandom();
MatrixType submat = mat.template block<4,4>(i,j);
MatrixType mat_inv = mat.template block<4,4>(i,j).inverse();
VERIFY_IS_APPROX(mat_inv, submat.inverse());
mat.template block<4,4>(i,j) = submat.inverse();
VERIFY_IS_APPROX(mat_inv, (mat.template block<4,4>(i,j)));
}
}
void test_prec_inverse_4x4()
{
CALL_SUBTEST_1((inverse_permutation_4x4<Matrix4f>()));
CALL_SUBTEST_1(( inverse_general_4x4<Matrix4f>(200000 * g_repeat) ));
CALL_SUBTEST_1(( inverse_general_4x4<Matrix<float,4,4,RowMajor> >(200000 * g_repeat) ));
CALL_SUBTEST_2((inverse_permutation_4x4<Matrix<double,4,4,RowMajor> >()));
CALL_SUBTEST_2(( inverse_general_4x4<Matrix<double,4,4,ColMajor> >(200000 * g_repeat) ));
CALL_SUBTEST_2(( inverse_general_4x4<Matrix<double,4,4,RowMajor> >(200000 * g_repeat) ));
CALL_SUBTEST_3((inverse_permutation_4x4<Matrix4cf>()));

View File

@ -178,4 +178,12 @@ template<typename MatrixType> void product(const MatrixType& m)
// CwiseUnaryOp
VERIFY_IS_APPROX(x = Scalar(1.)*(A*x), A*z);
}
// regression for blas_trais
{
VERIFY_IS_APPROX(square * (square*square).transpose(), square * square.transpose() * square.transpose());
VERIFY_IS_APPROX(square * (-(square*square)), -square * square * square);
VERIFY_IS_APPROX(square * (s1*(square*square)), s1 * square * square * square);
VERIFY_IS_APPROX(square * (square*square).conjugate(), square * square.conjugate() * square.conjugate());
}
}

View File

@ -299,6 +299,14 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0)));
else
VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0)));
DenseVector rv = DenseVector::Random(m1.cols());
DenseVector cv = DenseVector::Random(m1.rows());
Index r = internal::random<Index>(0,m1.rows()-2);
Index c = internal::random<Index>(0,m1.cols()-1);
VERIFY_IS_APPROX(( m1.template block<1,Dynamic>(r,0,1,m1.cols()).dot(rv)) , refM1.row(r).dot(rv));
VERIFY_IS_APPROX(m1.row(r).dot(rv), refM1.row(r).dot(rv));
VERIFY_IS_APPROX(m1.col(c).dot(cv), refM1.col(c).dot(cv));
VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate());
VERIFY_IS_APPROX(m1.real(), refM1.real());

View File

@ -293,7 +293,7 @@ void MatrixExponential<MatrixType>::computeUV(float)
const float maxnorm = 3.925724783138660f;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
MatrixType A = m_M / Scalar(pow(2, m_squarings));
pade7(A);
}
}
@ -315,7 +315,7 @@ void MatrixExponential<MatrixType>::computeUV(double)
const double maxnorm = 5.371920351148152;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
MatrixType A = m_M / Scalar(pow(2, m_squarings));
pade13(A);
}
}
@ -340,7 +340,7 @@ void MatrixExponential<MatrixType>::computeUV(long double)
const long double maxnorm = 4.0246098906697353063L;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
MatrixType A = m_M / Scalar(pow(2, m_squarings));
pade13(A);
}
#elif LDBL_MANT_DIG <= 106 // double-double
@ -376,7 +376,7 @@ void MatrixExponential<MatrixType>::computeUV(long double)
const long double maxnorm = 2.884233277829519311757165057717815L;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
MatrixType A = m_M / Scalar(pow(2, m_squarings));
pade17(A);
}
#else

View File

@ -31,6 +31,8 @@ namespace Eigen
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ };
enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
/** \brief The data type used to store non-zero basis functions. */
typedef Array<Scalar,1,OrderAtCompileTime> BasisVectorType;
@ -39,7 +41,7 @@ namespace Eigen
typedef Array<Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
/** \brief The data type used to store the spline's derivative values. */
typedef Array<Scalar,Dimension,Dynamic,ColMajor,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
typedef Array<Scalar,Dimension,Dynamic,DerivativeMemoryLayout,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
/** \brief The point type the spline is representing. */
typedef Array<Scalar,Dimension,1> PointType;
@ -62,12 +64,14 @@ namespace Eigen
{
enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ };
enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ };
/** \brief The data type used to store the values of the basis function derivatives. */
typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
/** \brief The data type used to store the spline's derivative values. */
typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
};
/** \brief 2D float B-spline with dynamic degree. */

View File

@ -162,6 +162,15 @@ void test_autodiff_jacobian()
CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
}
double bug_1222() {
typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
const double _cv1_3 = 1.0;
const AD chi_3 = 1.0;
// this line did not work, because operator+ returns ADS<DerType&>, which then cannot be converted to ADS<DerType>
const AD denom = chi_3 + _cv1_3;
return denom.value();
}
void test_autodiff()
{
for(int i = 0; i < g_repeat; i++) {
@ -169,5 +178,7 @@ void test_autodiff()
CALL_SUBTEST_2( test_autodiff_vector() );
CALL_SUBTEST_3( test_autodiff_jacobian() );
}
bug_1222();
}

View File

@ -26,7 +26,7 @@
#include <boost/pool/pool_alloc.hpp>
#include <cmath>
#include <iosfwd>
#include <iostream>
#include <typeinfo> // operator typeid
namespace gtsam {

View File

@ -32,6 +32,7 @@
#include <list>
#include <fstream>
#include <limits>
#include <iostream>
using namespace std;

View File

@ -24,7 +24,7 @@
#include <boost/serialization/nvp.hpp>
#include <cassert>
#include <stdexcept>
#include <vector>
#include <array>
namespace boost {
namespace serialization {

View File

@ -21,6 +21,7 @@
#include <stdexcept>
#include <cstdarg>
#include <limits>
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>

View File

@ -19,6 +19,7 @@
#include <gtsam/dllexport.h>
#include <gtsam/base/VectorSpace.h>
#include <iostream>
namespace gtsam {

View File

@ -22,6 +22,7 @@
#include <gtsam/base/Testable.h>
#include <gtsam/base/OptionalJacobian.h>
#include <boost/concept/assert.hpp>
#include <iostream>
namespace gtsam {

View File

@ -0,0 +1,39 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Cal3_S2Stereo.cpp
* @brief The most common 5DOF 3D->2D calibration + Stereo baseline
* @author Chris Beall
*/
#include <gtsam/geometry/Cal3_S2Stereo.h>
#include <iostream>
namespace gtsam {
using namespace std;
/* ************************************************************************* */
void Cal3_S2Stereo::print(const std::string& s) const {
K_.print(s+"K: ");
std::cout << s << "Baseline: " << b_ << std::endl;
}
/* ************************************************************************* */
bool Cal3_S2Stereo::equals(const Cal3_S2Stereo& other, double tol) const {
if (fabs(b_ - other.b_) > tol) return false;
return K_.equals(other.K_,tol);
}
/* ************************************************************************* */
} // namespace gtsam

View File

@ -18,6 +18,7 @@
#pragma once
#include <gtsam/geometry/Cal3_S2.h>
#include <iosfwd>
namespace gtsam {
@ -62,16 +63,10 @@ namespace gtsam {
/// @name Testable
/// @{
void print(const std::string& s = "") const {
K_.print(s+"K: ");
std::cout << s << "Baseline: " << b_ << std::endl;
}
void print(const std::string& s = "") const;
/// Check if equal up to specified tolerance
bool equals(const Cal3_S2Stereo& other, double tol = 10e-9) const {
if (fabs(b_ - other.b_) > tol) return false;
return K_.equals(other.K_,tol);
}
bool equals(const Cal3_S2Stereo& other, double tol = 10e-9) const;
/// @}
/// @name Standard Interface

View File

@ -17,6 +17,7 @@
#include <gtsam/geometry/Point2.h>
#include <cmath>
#include <iostream>
using namespace std;

View File

@ -16,6 +16,7 @@
#include <gtsam/geometry/Point3.h>
#include <cmath>
#include <iostream>
using namespace std;

View File

@ -21,6 +21,7 @@
#include <gtsam/base/concepts.h>
#include <gtsam/geometry/SO3.h> // Logmap/Expmap derivatives
#include <limits>
#include <iostream>
#define QUATERNION_TYPE Eigen::Quaternion<_Scalar,_Options>

View File

@ -17,6 +17,7 @@
*/
#include <gtsam/geometry/Rot2.h>
#include <iostream>
using namespace std;

View File

@ -22,6 +22,7 @@
#include <gtsam/base/concepts.h>
#include <cmath>
#include <limits>
#include <iostream>
namespace gtsam {
@ -116,6 +117,12 @@ SO3 SO3::AxisAngle(const Vector3& axis, double theta) {
return so3::ExpmapFunctor(axis, theta).expmap();
}
/* ************************************************************************* */
void SO3::print(const std::string& s) const {
std::cout << s << *this << std::endl;
}
/* ************************************************************************* */
SO3 SO3::Expmap(const Vector3& omega, ChartJacobian H) {
if (H) {
so3::DexpFunctor impl(omega);

View File

@ -24,6 +24,7 @@
#include <gtsam/base/Lie.h>
#include <cmath>
#include <iosfwd>
namespace gtsam {
@ -67,9 +68,7 @@ public:
/// @name Testable
/// @{
void print(const std::string& s) const {
std::cout << s << *this << std::endl;
}
void print(const std::string& s) const;
bool equals(const SO3 & R, double tol) const {
return equal_with_abs_tol(*this, R, tol);

View File

@ -219,15 +219,13 @@ FastVector<JacobianFactor::shared_ptr> _convertOrCastToJacobians(
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
boost::optional<const Ordering&> ordering,
boost::optional<const VariableSlots&> variableSlots) {
boost::optional<const VariableSlots&> p_variableSlots) {
gttic(JacobianFactor_combine_constructor);
// Compute VariableSlots if one was not provided
boost::optional<VariableSlots> computedVariableSlots;
if (!variableSlots) {
computedVariableSlots = VariableSlots(graph);
variableSlots = computedVariableSlots; // Binds reference, does not copy VariableSlots
}
// Binds reference, does not copy VariableSlots
const VariableSlots & variableSlots =
p_variableSlots ? p_variableSlots.get() : VariableSlots(graph);
// Cast or convert to Jacobians
FastVector<JacobianFactor::shared_ptr> jacobians = _convertOrCastToJacobians(
@ -238,15 +236,15 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
// 'unorderedSlots' of any variables discovered that are not in the ordering. Those will then
// be added after all of the ordered variables.
FastVector<VariableSlots::const_iterator> orderedSlots;
orderedSlots.reserve(variableSlots->size());
orderedSlots.reserve(variableSlots.size());
if (ordering) {
// If an ordering is provided, arrange the slots first that ordering
FastList<VariableSlots::const_iterator> unorderedSlots;
size_t nOrderingSlotsUsed = 0;
orderedSlots.resize(ordering->size());
FastMap<Key, size_t> inverseOrdering = ordering->invert();
for (VariableSlots::const_iterator item = variableSlots->begin();
item != variableSlots->end(); ++item) {
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item) {
FastMap<Key, size_t>::const_iterator orderingPosition =
inverseOrdering.find(item->first);
if (orderingPosition == inverseOrdering.end()) {
@ -267,8 +265,8 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
} else {
// If no ordering is provided, arrange the slots as they were, which will be sorted
// numerically since VariableSlots uses a map sorting on Key.
for (VariableSlots::const_iterator item = variableSlots->begin();
item != variableSlots->end(); ++item)
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item)
orderedSlots.push_back(item);
}
gttoc(Order_slots);

View File

@ -154,7 +154,7 @@ namespace gtsam {
explicit JacobianFactor(
const GaussianFactorGraph& graph,
boost::optional<const Ordering&> ordering = boost::none,
boost::optional<const VariableSlots&> variableSlots = boost::none);
boost::optional<const VariableSlots&> p_variableSlots = boost::none);
/** Virtual destructor */
virtual ~JacobianFactor() {}

View File

@ -243,7 +243,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
stay = false;
}
}
gttoc(adjust_Delta);
gttoc(adjust_delta);
}
// dx_d and f_error have already been filled in during the loop

View File

@ -23,6 +23,7 @@
#include <gtsam/nonlinear/NonlinearOptimizer.h>
#include <gtsam/nonlinear/LevenbergMarquardtParams.h>
#include <gtsam/linear/VectorValues.h>
#include <boost/date_time/posix_time/posix_time.hpp>
class NonlinearOptimizerMoreOptimizationTest;

View File

@ -25,6 +25,7 @@
#include <boost/type_traits/aligned_storage.hpp>
#include <Eigen/Core>
#include <iostream>
namespace gtsam {
namespace internal {

View File

@ -23,6 +23,7 @@
#include <gtsam/base/Testable.h>
#include <CppUnitLite/TestHarness.h>
#include <iostream>
using namespace std;
using namespace gtsam;

View File

@ -92,7 +92,7 @@ namespace gtsam {
T hx = traits<T>::Between(p1, p2, H1, H2); // h(x)
// manifold equivalent of h(x)-z -> log(z,h(x))
#ifdef SLOW_BUT_CORRECT_BETWEENFACTOR
typename traits<T>::ChartJacobian::Fixed Hlocal;
typename traits<T>::ChartJacobian::Jacobian Hlocal;
Vector rval = traits<T>::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0);
if (H1) *H1 = Hlocal * (*H1);
if (H1) *H2 = Hlocal * (*H2);

View File

@ -18,7 +18,21 @@
*/
#include <gtsam_unstable/geometry/Event.h>
#include <iostream>
namespace gtsam {
/* ************************************************************************* */
void Event::print(const std::string& s) const {
std::cout << s << "time = " << time_ << "location = " << location_.transpose();
}
/* ************************************************************************* */
bool Event::equals(const Event& other, double tol) const {
return std::abs(time_ - other.time_) < tol
&& traits<Point3>::Equals(location_, other.location_, tol);
}
/* ************************************************************************* */
} //\ namespace gtsam

View File

@ -21,6 +21,7 @@
#include <gtsam/geometry/Point3.h>
#include <cmath>
#include <iosfwd>
namespace gtsam {
@ -59,15 +60,10 @@ public:
}
/** print with optional string */
void print(const std::string& s = "") const {
std::cout << s << "time = " << time_ << "location = " << location_.transpose();
}
void print(const std::string& s = "") const;
/** equals with an tolerance */
bool equals(const Event& other, double tol = 1e-9) const {
return std::abs(time_ - other.time_) < tol
&& traits<Point3>::Equals(location_, other.location_, tol);
}
bool equals(const Event& other, double tol = 1e-9) const;
/// Updates a with tangent space delta
inline Event retract(const Vector4& v) const {

View File

@ -18,7 +18,6 @@
#include "Argument.h"
#include <boost/regex.hpp>
#include <boost/lexical_cast.hpp>
#include <iostream>

View File

@ -1,6 +1,6 @@
# Build/install Wrap
set(WRAP_BOOST_LIBRARIES ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_REGEX_LIBRARY})
set(WRAP_BOOST_LIBRARIES ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY})
# Allow for disabling serialization to handle errors related to Clang's linker
option(GTSAM_WRAP_SERIALIZATION "If enabled, allows for wrapped objects to be saved via boost.serialization" ON)

View File

@ -177,8 +177,8 @@ void Module::parseMarkup(const std::string& data) {
// parse forward declaration
ForwardDeclaration fwDec0, fwDec;
Rule forward_declaration_p =
!(str_p("virtual")[assign_a(fwDec.isVirtual, true)])
>> str_p("class")
!(str_p("virtual")[assign_a(fwDec.isVirtual, T)])
>> str_p("class")
>> (*(basic.namespace_p >> str_p("::")) >> basic.className_p)[assign_a(fwDec.name)]
>> ch_p(';')
[push_back_a(forward_declarations, fwDec)]

View File

@ -102,9 +102,19 @@ TEST( spirit, constMethod_p ) {
EXPECT(parse("double norm() const;", constMethod_p, space_p).full);
}
/* ************************************************************************* */
/* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56665
GCC compiler issues with -O2 and -fno-strict-aliasing results in undefined
behaviour when spirit uses assign_a with a literal.
GCC versions 4.7.2 -> 5.4 inclusive */
TEST( spirit, return_value_p ) {
bool isEigen = true;
static const bool T = true;
static const bool F = false;
bool isEigen = T;
string actual_return_type;
string actual_function_name;
@ -119,9 +129,9 @@ TEST( spirit, return_value_p ) {
Rule funcName_p = lexeme_d[lower_p >> *(alnum_p | '_')];
Rule returnType_p =
(basisType_p[assign_a(actual_return_type)][assign_a(isEigen, true)]) |
(className_p[assign_a(actual_return_type)][assign_a(isEigen,false)]) |
(eigenType_p[assign_a(actual_return_type)][assign_a(isEigen, true)]);
(basisType_p[assign_a(actual_return_type)][assign_a(isEigen, T)]) |
(className_p[assign_a(actual_return_type)][assign_a(isEigen, F)]) |
(eigenType_p[assign_a(actual_return_type)][assign_a(isEigen, T)]);
Rule testFunc_p = returnType_p >> funcName_p[assign_a(actual_function_name)] >> str_p("();");