From ab1f6562c802bf815d42dc327e7d704b1b475d1e Mon Sep 17 00:00:00 2001 From: = Date: Sat, 6 Aug 2016 00:40:04 -0400 Subject: [PATCH 01/10] Fixes compile errors when using BOOST version 1.61.0 --- examples/TimeTBB.cpp | 1 + gtsam/base/GenericValue.h | 1 + gtsam/base/Matrix.cpp | 1 + gtsam/base/Vector.cpp | 1 + gtsam/base/deprecated/LieScalar.h | 1 + gtsam/geometry/BearingRange.h | 1 + gtsam/geometry/Cal3_S2Stereo.h | 1 + gtsam/geometry/Point2.cpp | 1 + gtsam/geometry/Point3.cpp | 1 + gtsam/geometry/Quaternion.h | 1 + gtsam/geometry/Rot2.cpp | 1 + gtsam/geometry/SO3.h | 1 + gtsam/linear/JacobianFactor.cpp | 20 +++++++++---------- gtsam/linear/JacobianFactor.h | 2 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 1 + gtsam/nonlinear/internal/ExecutionTrace.h | 1 + gtsam/nonlinear/tests/testCallRecord.cpp | 1 + gtsam_unstable/geometry/Event.h | 1 + 18 files changed, 26 insertions(+), 12 deletions(-) diff --git a/examples/TimeTBB.cpp b/examples/TimeTBB.cpp index aa2195078..178fa513f 100644 --- a/examples/TimeTBB.cpp +++ b/examples/TimeTBB.cpp @@ -20,6 +20,7 @@ #include #include +#include using namespace std; using namespace gtsam; diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index 17783c5b9..7c4d0398d 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -28,6 +28,7 @@ #include #include #include // operator typeid +#include namespace gtsam { diff --git a/gtsam/base/Matrix.cpp b/gtsam/base/Matrix.cpp index d74e1c122..0c37b405e 100644 --- a/gtsam/base/Matrix.cpp +++ b/gtsam/base/Matrix.cpp @@ -32,6 +32,7 @@ #include #include #include +#include using namespace std; diff --git a/gtsam/base/Vector.cpp b/gtsam/base/Vector.cpp index 8f17e4c6e..6dc9800ca 100644 --- a/gtsam/base/Vector.cpp +++ b/gtsam/base/Vector.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/gtsam/base/deprecated/LieScalar.h b/gtsam/base/deprecated/LieScalar.h index f9c424e95..68632addc 100644 --- a/gtsam/base/deprecated/LieScalar.h +++ b/gtsam/base/deprecated/LieScalar.h @@ -19,6 +19,7 @@ #include #include +#include namespace gtsam { diff --git a/gtsam/geometry/BearingRange.h b/gtsam/geometry/BearingRange.h index 27fe2f197..b1e003864 100644 --- a/gtsam/geometry/BearingRange.h +++ b/gtsam/geometry/BearingRange.h @@ -22,6 +22,7 @@ #include #include #include +#include namespace gtsam { diff --git a/gtsam/geometry/Cal3_S2Stereo.h b/gtsam/geometry/Cal3_S2Stereo.h index 365a6c40e..29ccd194d 100644 --- a/gtsam/geometry/Cal3_S2Stereo.h +++ b/gtsam/geometry/Cal3_S2Stereo.h @@ -18,6 +18,7 @@ #pragma once #include +#include namespace gtsam { diff --git a/gtsam/geometry/Point2.cpp b/gtsam/geometry/Point2.cpp index 2152a7c39..74b9a2bec 100644 --- a/gtsam/geometry/Point2.cpp +++ b/gtsam/geometry/Point2.cpp @@ -17,6 +17,7 @@ #include #include +#include using namespace std; diff --git a/gtsam/geometry/Point3.cpp b/gtsam/geometry/Point3.cpp index 091906d5f..8df5f5607 100644 --- a/gtsam/geometry/Point3.cpp +++ b/gtsam/geometry/Point3.cpp @@ -16,6 +16,7 @@ #include #include +#include using namespace std; diff --git a/gtsam/geometry/Quaternion.h b/gtsam/geometry/Quaternion.h index 0ab4a5ee6..af9481181 100644 --- a/gtsam/geometry/Quaternion.h +++ b/gtsam/geometry/Quaternion.h @@ -21,6 +21,7 @@ #include #include // Logmap/Expmap derivatives #include +#include #define QUATERNION_TYPE Eigen::Quaternion<_Scalar,_Options> diff --git a/gtsam/geometry/Rot2.cpp b/gtsam/geometry/Rot2.cpp index 9780cb49a..4cabe7140 100644 --- a/gtsam/geometry/Rot2.cpp +++ b/gtsam/geometry/Rot2.cpp @@ -17,6 +17,7 @@ */ #include +#include using namespace std; diff --git a/gtsam/geometry/SO3.h b/gtsam/geometry/SO3.h index 3152fa2fb..0396fbce0 100644 --- a/gtsam/geometry/SO3.h +++ b/gtsam/geometry/SO3.h @@ -24,6 +24,7 @@ #include #include +#include namespace gtsam { diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index 06c8f3f64..4597759e3 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -219,15 +219,13 @@ FastVector _convertOrCastToJacobians( /* ************************************************************************* */ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph, boost::optional ordering, - boost::optional variableSlots) { + boost::optional p_variableSlots) { gttic(JacobianFactor_combine_constructor); // Compute VariableSlots if one was not provided - boost::optional computedVariableSlots; - if (!variableSlots) { - computedVariableSlots = VariableSlots(graph); - variableSlots = computedVariableSlots; // Binds reference, does not copy VariableSlots - } + // Binds reference, does not copy VariableSlots + const VariableSlots & variableSlots = + p_variableSlots ? p_variableSlots.get() : VariableSlots(graph); // Cast or convert to Jacobians FastVector jacobians = _convertOrCastToJacobians( @@ -238,15 +236,15 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph, // 'unorderedSlots' of any variables discovered that are not in the ordering. Those will then // be added after all of the ordered variables. FastVector orderedSlots; - orderedSlots.reserve(variableSlots->size()); + orderedSlots.reserve(variableSlots.size()); if (ordering) { // If an ordering is provided, arrange the slots first that ordering FastList unorderedSlots; size_t nOrderingSlotsUsed = 0; orderedSlots.resize(ordering->size()); FastMap inverseOrdering = ordering->invert(); - for (VariableSlots::const_iterator item = variableSlots->begin(); - item != variableSlots->end(); ++item) { + for (VariableSlots::const_iterator item = variableSlots.begin(); + item != variableSlots.end(); ++item) { FastMap::const_iterator orderingPosition = inverseOrdering.find(item->first); if (orderingPosition == inverseOrdering.end()) { @@ -267,8 +265,8 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph, } else { // If no ordering is provided, arrange the slots as they were, which will be sorted // numerically since VariableSlots uses a map sorting on Key. - for (VariableSlots::const_iterator item = variableSlots->begin(); - item != variableSlots->end(); ++item) + for (VariableSlots::const_iterator item = variableSlots.begin(); + item != variableSlots.end(); ++item) orderedSlots.push_back(item); } gttoc(Order_slots); diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h index 14ff46241..d7814f541 100644 --- a/gtsam/linear/JacobianFactor.h +++ b/gtsam/linear/JacobianFactor.h @@ -154,7 +154,7 @@ namespace gtsam { explicit JacobianFactor( const GaussianFactorGraph& graph, boost::optional ordering = boost::none, - boost::optional variableSlots = boost::none); + boost::optional p_variableSlots = boost::none); /** Virtual destructor */ virtual ~JacobianFactor() {} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 904e08770..f1135d2f0 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -23,6 +23,7 @@ #include #include #include +#include class NonlinearOptimizerMoreOptimizationTest; diff --git a/gtsam/nonlinear/internal/ExecutionTrace.h b/gtsam/nonlinear/internal/ExecutionTrace.h index ed811764a..a147f505e 100644 --- a/gtsam/nonlinear/internal/ExecutionTrace.h +++ b/gtsam/nonlinear/internal/ExecutionTrace.h @@ -25,6 +25,7 @@ #include #include +#include namespace gtsam { namespace internal { diff --git a/gtsam/nonlinear/tests/testCallRecord.cpp b/gtsam/nonlinear/tests/testCallRecord.cpp index 208f0b284..5fc4e208d 100644 --- a/gtsam/nonlinear/tests/testCallRecord.cpp +++ b/gtsam/nonlinear/tests/testCallRecord.cpp @@ -23,6 +23,7 @@ #include #include +#include using namespace std; using namespace gtsam; diff --git a/gtsam_unstable/geometry/Event.h b/gtsam_unstable/geometry/Event.h index 40c70696b..5bd37d2d2 100644 --- a/gtsam_unstable/geometry/Event.h +++ b/gtsam_unstable/geometry/Event.h @@ -21,6 +21,7 @@ #include #include +#include namespace gtsam { From 01b3bf40389152830ff53d22a4d6a1a1255e8a5a Mon Sep 17 00:00:00 2001 From: Carl Morgan Date: Thu, 11 Aug 2016 14:23:26 +1200 Subject: [PATCH 02/10] boost::spirit assign_a fixes to use non-literials --- wrap/Module.cpp | 2 +- wrap/tests/testSpirit.cpp | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/wrap/Module.cpp b/wrap/Module.cpp index a115c5e10..61d2a29e0 100644 --- a/wrap/Module.cpp +++ b/wrap/Module.cpp @@ -152,7 +152,7 @@ void Module::parseMarkup(const std::string& data) { // parse forward declaration ForwardDeclaration fwDec0, fwDec; Rule forward_declaration_p = - !(str_p("virtual")[assign_a(fwDec.isVirtual, true)]) + !(str_p("virtual")[assign_a(fwDec.isVirtual, T)]) >> str_p("class") >> (*(basic.namespace_p >> str_p("::")) >> basic.className_p)[assign_a(fwDec.name)] >> ch_p(';') diff --git a/wrap/tests/testSpirit.cpp b/wrap/tests/testSpirit.cpp index 2a525e08e..27c9be6d6 100644 --- a/wrap/tests/testSpirit.cpp +++ b/wrap/tests/testSpirit.cpp @@ -102,9 +102,19 @@ TEST( spirit, constMethod_p ) { EXPECT(parse("double norm() const;", constMethod_p, space_p).full); } + /* ************************************************************************* */ +/* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56665 + GCC compiler issues with -O2 and -fno-strict-aliasing results in undefined + behaviour when spirit uses assign_a with a literal. + GCC versions 4.7.2 -> 5.4 inclusive */ + TEST( spirit, return_value_p ) { - bool isEigen = true; + static const bool T = true; + static const bool F = false; + + bool isEigen = T; + string actual_return_type; string actual_function_name; @@ -119,9 +129,9 @@ TEST( spirit, return_value_p ) { Rule funcName_p = lexeme_d[lower_p >> *(alnum_p | '_')]; Rule returnType_p = - (basisType_p[assign_a(actual_return_type)][assign_a(isEigen, true)]) | - (className_p[assign_a(actual_return_type)][assign_a(isEigen,false)]) | - (eigenType_p[assign_a(actual_return_type)][assign_a(isEigen, true)]); + (basisType_p[assign_a(actual_return_type)][assign_a(isEigen, T)]) | + (className_p[assign_a(actual_return_type)][assign_a(isEigen, F)]) | + (eigenType_p[assign_a(actual_return_type)][assign_a(isEigen, T)]); Rule testFunc_p = returnType_p >> funcName_p[assign_a(actual_function_name)] >> str_p("();"); From d1cdafa3f57a74c5dfb9baa21dbb505f0cce6001 Mon Sep 17 00:00:00 2001 From: Ryan Estep Date: Mon, 29 Aug 2016 09:07:03 +1200 Subject: [PATCH 03/10] Removed the boost::regex include (not used) from the matlab wrapper & removed any linking to boost::regex --- CMakeLists.txt | 6 +++--- cmake/GtsamMatlabWrap.cmake | 3 +-- wrap/Argument.cpp | 1 - wrap/CMakeLists.txt | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c93264f3..77434d135 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -116,11 +116,11 @@ if(MSVC) endif() endif() -find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time regex timer chrono) +find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time timer chrono) # Required components if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR - NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY OR NOT Boost_REGEX_LIBRARY) + NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.") endif() @@ -128,7 +128,7 @@ option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) # Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) set(GTSAM_BOOST_LIBRARIES ${Boost_SERIALIZATION_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} - ${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_REGEX_LIBRARY}) + ${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY}) if (GTSAM_DISABLE_NEW_TIMERS) message("WARNING: GTSAM timing instrumentation manually disabled") add_definitions(-DGTSAM_DISABLE_NEW_TIMERS) diff --git a/cmake/GtsamMatlabWrap.cmake b/cmake/GtsamMatlabWrap.cmake index d1d3d93dd..a9e04a01a 100644 --- a/cmake/GtsamMatlabWrap.cmake +++ b/cmake/GtsamMatlabWrap.cmake @@ -128,8 +128,7 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex ## This needs to be fixed!! if(UNIX AND NOT APPLE) list(APPEND automaticDependencies ${Boost_SERIALIZATION_LIBRARY_RELEASE} ${Boost_FILESYSTEM_LIBRARY_RELEASE} - ${Boost_SYSTEM_LIBRARY_RELEASE} ${Boost_THREAD_LIBRARY_RELEASE} ${Boost_DATE_TIME_LIBRARY_RELEASE} - ${Boost_REGEX_LIBRARY_RELEASE}) + ${Boost_SYSTEM_LIBRARY_RELEASE} ${Boost_THREAD_LIBRARY_RELEASE} ${Boost_DATE_TIME_LIBRARY_RELEASE}) if(Boost_TIMER_LIBRARY_RELEASE AND NOT GTSAM_DISABLE_NEW_TIMERS) # Only present in Boost >= 1.48.0 list(APPEND automaticDependencies ${Boost_TIMER_LIBRARY_RELEASE} ${Boost_CHRONO_LIBRARY_RELEASE}) if(GTSAM_MEX_BUILD_STATIC_MODULE) diff --git a/wrap/Argument.cpp b/wrap/Argument.cpp index cde04afe0..52d9ca0b5 100644 --- a/wrap/Argument.cpp +++ b/wrap/Argument.cpp @@ -18,7 +18,6 @@ #include "Argument.h" -#include #include #include diff --git a/wrap/CMakeLists.txt b/wrap/CMakeLists.txt index 03915a662..04d471b39 100644 --- a/wrap/CMakeLists.txt +++ b/wrap/CMakeLists.txt @@ -1,6 +1,6 @@ # Build/install Wrap -set(WRAP_BOOST_LIBRARIES ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_REGEX_LIBRARY}) +set(WRAP_BOOST_LIBRARIES ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY}) # Allow for disabling serialization to handle errors related to Clang's linker option(GTSAM_WRAP_SERIALIZATION "If enabled, allows for wrapped objects to be saved via boost.serialization" ON) From a7fbce3e4c3b0fba45a64ab87ab14a8c5df734d3 Mon Sep 17 00:00:00 2001 From: Jing Dong Date: Mon, 26 Sep 2016 17:01:41 -0400 Subject: [PATCH 04/10] fix dogleg compile issue in CMake Timing mode --- gtsam/nonlinear/DoglegOptimizerImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.h b/gtsam/nonlinear/DoglegOptimizerImpl.h index cead7f9db..9a7067878 100644 --- a/gtsam/nonlinear/DoglegOptimizerImpl.h +++ b/gtsam/nonlinear/DoglegOptimizerImpl.h @@ -243,7 +243,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( stay = false; } } - gttoc(adjust_Delta); + gttoc(adjust_delta); } // dx_d and f_error have already been filled in during the loop From f9de023caff5e7dab0daf94acd06a389b1a46629 Mon Sep 17 00:00:00 2001 From: chrisbeall Date: Thu, 6 Oct 2016 14:25:40 -0700 Subject: [PATCH 05/10] Only add custom all.tests target when GTSAM_BUILD_TESTS is true --- cmake/GtsamTesting.cmake | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake/GtsamTesting.cmake b/cmake/GtsamTesting.cmake index 62a129010..15d4219e6 100644 --- a/cmake/GtsamTesting.cmake +++ b/cmake/GtsamTesting.cmake @@ -101,6 +101,9 @@ mark_as_advanced(GTSAM_SINGLE_TEST_EXE) # Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck) if(GTSAM_BUILD_TESTS) add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $ --output-on-failure) + + # Add target to build tests without running + add_custom_target(all.tests) endif() # Add examples target @@ -109,8 +112,6 @@ add_custom_target(examples) # Add timing target add_custom_target(timing) -# Add target to build tests without running -add_custom_target(all.tests) # Implementations of this file's macros: From e64bc1007ca12cbd3fb282fd2bf769e4cdfb5a2f Mon Sep 17 00:00:00 2001 From: chrisbeall Date: Sun, 9 Oct 2016 22:18:11 -0700 Subject: [PATCH 06/10] Upgrade to Eigen 3.2.10 - http://eigen.tuxfamily.org/index.php?title=ChangeLog#Eigen_3.2.10 --- gtsam/3rdparty/Eigen/.hg_archival.txt | 4 +- gtsam/3rdparty/Eigen/.hgtags | 2 + gtsam/3rdparty/Eigen/Eigen/src/Core/Block.h | 5 +- .../Eigen/Eigen/src/Core/CommaInitializer.h | 25 +- .../Eigen/Eigen/src/Core/DiagonalMatrix.h | 6 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h | 2 +- .../3rdparty/Eigen/Eigen/src/Core/Functors.h | 3 + .../Eigen/Eigen/src/Core/GeneralProduct.h | 5 - .../Eigen/Eigen/src/Core/GenericPacketMath.h | 4 +- .../Eigen/Eigen/src/Core/MathFunctions.h | 40 +- .../Eigen/Eigen/src/Core/PermutationMatrix.h | 3 +- .../Eigen/Eigen/src/Core/PlainObjectBase.h | 6 +- gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h | 20 +- .../Eigen/Eigen/src/Core/SolveTriangular.h | 3 +- .../3rdparty/Eigen/Eigen/src/Core/Transpose.h | 9 +- .../Eigen/Eigen/src/Core/Transpositions.h | 3 +- .../Core/products/TriangularSolverMatrix.h | 2 +- .../Eigen/Eigen/src/Core/util/BlasUtil.h | 57 +- .../Eigen/Eigen/src/Core/util/Constants.h | 16 +- .../src/Core/util/DisableStupidWarnings.h | 8 + .../Eigen/Eigen/src/Core/util/Macros.h | 2 +- .../Eigen/Eigen/src/Core/util/Memory.h | 133 +- .../src/Core/util/ReenableStupidWarnings.h | 3 + .../src/Eigenvalues/GeneralizedEigenSolver.h | 32 +- .../src/Eigenvalues/Tridiagonalization.h | 4 +- .../Eigen/Eigen/src/Geometry/Homogeneous.h | 2 +- .../Eigen/Eigen/src/Geometry/Quaternion.h | 2 +- .../Eigen/Eigen/src/Geometry/Transform.h | 8 +- .../Eigen/Eigen/src/Geometry/Translation.h | 6 +- .../Eigen/Eigen/src/Householder/Householder.h | 3 +- .../src/Householder/HouseholderSequence.h | 3 +- gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h | 2 +- .../Eigen/Eigen/src/LU/arch/Inverse_SSE.h | 28 +- .../Eigen/src/PaStiXSupport/PaStiXSupport.h | 12 +- .../Eigen/src/PardisoSupport/PardisoSupport.h | 21 +- .../Eigen/Eigen/src/QR/ColPivHouseholderQR.h | 3 - .../Eigen/Eigen/src/QR/FullPivHouseholderQR.h | 3 - .../Eigen/Eigen/src/QR/HouseholderQR.h | 3 - .../3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h | 90 +- .../Eigen/src/SparseCore/CompressedStorage.h | 11 +- .../Eigen/Eigen/src/SparseCore/SparseBlock.h | 1162 +++++++++-------- .../Eigen/Eigen/src/SparseCore/SparseMatrix.h | 12 +- .../Eigen/src/SparseCore/SparseMatrixBase.h | 1 + .../Eigen/Eigen/src/SparseCore/SparseRedux.h | 7 +- .../SparseSparseProductWithPruning.h | 2 +- .../Eigen/Eigen/src/SparseCore/SparseVector.h | 10 +- .../Eigen/src/UmfPackSupport/UmfPackSupport.h | 3 +- .../Eigen/bench/btl/generic_bench/btl.hh | 13 +- .../Eigen/cmake/EigenConfigureTesting.cmake | 11 +- .../Eigen/cmake/EigenDetermineOSVersion.cmake | 2 +- .../Eigen/doc/SparseLinearSystems.dox | 3 + gtsam/3rdparty/Eigen/doc/TopicAssertions.dox | 2 +- gtsam/3rdparty/Eigen/test/CMakeLists.txt | 1 + .../3rdparty/Eigen/test/commainitializer.cpp | 62 +- gtsam/3rdparty/Eigen/test/cwiseop.cpp | 3 + .../Eigen/test/eigen2/eigen2_cwiseop.cpp | 3 + gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp | 2 + .../Eigen/test/geo_transformations.cpp | 79 ++ gtsam/3rdparty/Eigen/test/packetmath.cpp | 1 + .../3rdparty/Eigen/test/prec_inverse_4x4.cpp | 15 + gtsam/3rdparty/Eigen/test/product.h | 8 + gtsam/3rdparty/Eigen/test/sparse_basic.cpp | 8 + .../src/MatrixFunctions/MatrixExponential.h | 8 +- .../unsupported/Eigen/src/Splines/SplineFwd.h | 8 +- .../Eigen/unsupported/test/autodiff.cpp | 11 + 65 files changed, 1201 insertions(+), 830 deletions(-) diff --git a/gtsam/3rdparty/Eigen/.hg_archival.txt b/gtsam/3rdparty/Eigen/.hg_archival.txt index c4115d7f6..4dd5bd180 100644 --- a/gtsam/3rdparty/Eigen/.hg_archival.txt +++ b/gtsam/3rdparty/Eigen/.hg_archival.txt @@ -1,4 +1,4 @@ repo: 8a21fd850624c931e448cbcfb38168cb2717c790 -node: 07105f7124f9aef00a68c85e0fc606e65d3d6c15 +node: b9cd8366d4e8f49471c7afafc4c2a1b00e54a54d branch: 3.2 -tag: 3.2.8 +tag: 3.2.10 diff --git a/gtsam/3rdparty/Eigen/.hgtags b/gtsam/3rdparty/Eigen/.hgtags index 8f0097f20..c83128570 100644 --- a/gtsam/3rdparty/Eigen/.hgtags +++ b/gtsam/3rdparty/Eigen/.hgtags @@ -31,3 +31,5 @@ ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0 bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5 c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6 b30b87236a1b1552af32ac34075ee5696a9b5a33 3.2.7 +07105f7124f9aef00a68c85e0fc606e65d3d6c15 3.2.8 +dc6cfdf9bcec5efc7b6593bddbbb3d675de53524 3.2.9 diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Block.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Block.h index 827894443..87bedfa46 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Block.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Block.h @@ -66,9 +66,8 @@ struct traits > : traits::MaxColsAtCompileTime), XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, - IsDense = is_same::value, - IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 - : (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 + IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 + : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h index a036d8c3b..5dd3adeae 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/CommaInitializer.h @@ -76,9 +76,7 @@ struct CommaInitializer template CommaInitializer& operator,(const DenseBase& other) { - if(other.cols()==0 || other.rows()==0) - return *this; - if (m_col==m_xpr.cols()) + if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows)) { m_row+=m_currentBlockRows; m_col = 0; @@ -86,24 +84,18 @@ struct CommaInitializer eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } - eigen_assert(m_col - (m_row, m_col) = other; - else - m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other; + m_xpr.template block + (m_row, m_col, other.rows(), other.cols()) = other; m_col += other.cols(); return *this; } inline ~CommaInitializer() { - eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows() - && m_col == m_xpr.cols() - && "Too few coefficients passed to comma initializer (operator<<)"); + finished(); } /** \returns the built matrix once all its coefficients have been set. @@ -113,7 +105,12 @@ struct CommaInitializer * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); * \endcode */ - inline XprType& finished() { return m_xpr; } + inline XprType& finished() { + eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) + && m_col == m_xpr.cols() + && "Too few coefficients passed to comma initializer (operator<<)"); + return m_xpr; + } XprType& m_xpr; // target expression Index m_row; // current row id diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h index e6c220f41..53c757bef 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/DiagonalMatrix.h @@ -44,10 +44,10 @@ class DiagonalBase : public EigenBase template void evalTo(MatrixBase &other) const; template - void addTo(MatrixBase &other) const + inline void addTo(MatrixBase &other) const { other.diagonal() += diagonal(); } template - void subTo(MatrixBase &other) const + inline void subTo(MatrixBase &other) const { other.diagonal() -= diagonal(); } inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } @@ -98,7 +98,7 @@ class DiagonalBase : public EigenBase template template -void DiagonalBase::evalTo(MatrixBase &other) const +inline void DiagonalBase::evalTo(MatrixBase &other) const { other.setZero(); other.diagonal() = diagonal(); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h index 9d7651f1f..23aab831b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Dot.h @@ -59,7 +59,7 @@ struct dot_nocheck */ template template -typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType +inline typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType MatrixBase::dot(const MatrixBase& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h index 5f14c6587..5a1b2f28a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Functors.h @@ -969,6 +969,8 @@ template struct functor_traits > { enum { Cost = 1, PacketAccess = false }; }; +#if(__cplusplus < 201103L) +// std::binder* are deprecated since c++11 and will be removed in c++17 template struct functor_traits > { enum { Cost = functor_traits::Cost, PacketAccess = false }; }; @@ -976,6 +978,7 @@ struct functor_traits > template struct functor_traits > { enum { Cost = functor_traits::Cost, PacketAccess = false }; }; +#endif template struct functor_traits > diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h index 29ac522d2..5744eb71e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/GeneralProduct.h @@ -205,9 +205,6 @@ class GeneralProduct public: GeneralProduct(const Lhs& lhs, const Rhs& rhs) { - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) - Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); } @@ -264,8 +261,6 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { - EIGEN_STATIC_ASSERT((internal::is_same::value), - YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) } struct set { template void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h index 5f783ebee..c6e93bbb0 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/GenericPacketMath.h @@ -183,8 +183,8 @@ template inline void pstoreu(Scalar* to, const /** \internal tries to do cache prefetching of \a addr */ template inline void prefetch(const Scalar* addr) { -#if !defined(_MSC_VER) -__builtin_prefetch(addr); +#if (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC) + __builtin_prefetch(addr); #endif } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h index 4e17ecd4b..f707aa41e 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/MathFunctions.h @@ -218,8 +218,8 @@ struct conj_retval * Implementation of abs2 * ****************************************************************************/ -template -struct abs2_impl +template +struct abs2_impl_default { typedef typename NumTraits::Real RealScalar; static inline RealScalar run(const Scalar& x) @@ -228,15 +228,26 @@ struct abs2_impl } }; -template -struct abs2_impl > +template +struct abs2_impl_default // IsComplex { - static inline RealScalar run(const std::complex& x) + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x) { return real(x)*real(x) + imag(x)*imag(x); } }; +template +struct abs2_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x) + { + return abs2_impl_default::IsComplex>::run(x); + } +}; + template struct abs2_retval { @@ -496,11 +507,24 @@ struct floor_log2 template struct random_default_impl { - typedef typename NumTraits::NonInteger NonInteger; - static inline Scalar run(const Scalar& x, const Scalar& y) { - return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1))); + typedef typename conditional::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX; + if(y=x the result converted to an unsigned long is still correct. + std::size_t range = ScalarX(y)-ScalarX(x); + std::size_t offset = 0; + // rejection sampling + std::size_t divisor = 1; + std::size_t multiplier = 1; + if(range range); + return Scalar(ScalarX(x) + offset); } static inline Scalar run() diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h index 85ffae265..bda79fa04 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/PermutationMatrix.h @@ -584,10 +584,11 @@ struct permut_matrix_product_retval const Index n = Side==OnTheLeft ? rows() : cols(); // FIXME we need an is_same for expression that is not sensitive to constness. For instance // is_same_xpr, Block >::value should be true. + const typename Dest::Scalar *dst_data = internal::extract_data(dst); if( is_same::value && blas_traits::HasUsableDirectAccess && blas_traits::HasUsableDirectAccess - && extract_data(dst) == extract_data(m_matrix)) + && dst_data!=0 && dst_data == extract_data(m_matrix)) { // apply the permutation inplace Matrix mask(m_permutation.size()); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h index a4e4af4a7..9f71956ff 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/PlainObjectBase.h @@ -315,8 +315,8 @@ class PlainObjectBase : public internal::dense_xpr_base::type EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { const OtherDerived& other = _other.derived(); - internal::check_rows_cols_for_overflow::run(other.rows(), other.cols()); - const Index othersize = other.rows()*other.cols(); + internal::check_rows_cols_for_overflow::run(Index(other.rows()), Index(other.cols())); + const Index othersize = Index(other.rows())*Index(other.cols()); if(RowsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); @@ -487,7 +487,7 @@ class PlainObjectBase : public internal::dense_xpr_base::type /** \sa MatrixBase::operator=(const EigenBase&) */ template EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) - : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) + : m_storage(Index(other.derived().rows()) * Index(other.derived().cols()), other.derived().rows(), other.derived().cols()) { _check_template_params(); internal::check_rows_cols_for_overflow::run(other.derived().rows(), other.derived().cols()); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h index e30ae3d28..041f8222a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Reverse.h @@ -76,9 +76,23 @@ template class Reverse EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) using Base::IsRowMajor; - // next line is necessary because otherwise const version of operator() - // is hidden by non-const version defined in this file - using Base::operator(); + // The following two operators are provided to worarkound + // a MSVC 2013 issue. In theory, we could simply do: + // using Base::operator(); + // to make const version of operator() visible. + // Otheriwse, they would be hidden by the non-const versions defined in this file + + inline CoeffReturnType operator()(Index row, Index col) const + { + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return coeff(row, col); + } + + inline CoeffReturnType operator()(Index index) const + { + eigen_assert(index >= 0 && index < m_matrix.size()); + return coeff(index); + } protected: enum { diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h index 83565ddd8..30c9c38ec 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/SolveTriangular.h @@ -243,7 +243,8 @@ template struct triangular_solv template inline void evalTo(Dest& dst) const { - if(!(is_same::value && extract_data(dst) == extract_data(m_rhs))) + const typename Dest::Scalar *dst_data = internal::extract_data(dst); + if(!(is_same::value && dst_data!=0 && extract_data(dst) == extract_data(m_rhs))) dst = m_rhs; m_triangularMatrix.template solveInPlace(dst); } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h index 22096ea2f..2abce3c66 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpose.h @@ -331,11 +331,11 @@ inline void MatrixBase::adjointInPlace() namespace internal { -template -struct blas_traits > - : blas_traits +template +struct blas_traits > + : blas_traits::type> { - typedef SelfCwiseBinaryOp XprType; + typedef SelfCwiseBinaryOp XprType; static inline const XprType extract(const XprType& x) { return x; } }; @@ -392,7 +392,6 @@ struct checkTransposeAliasing_impl ::run(extract_data(dst), other)) && "aliasing detected during transposition, use transposeInPlace() " "or evaluate the rhs into a temporary using .eval()"); - } }; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h index e4ba0756f..16bc1ce15 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/Transpositions.h @@ -376,7 +376,8 @@ struct transposition_matrix_product_retval const int size = m_transpositions.size(); Index j = 0; - if(!(is_same::value && extract_data(dst) == extract_data(m_matrix))) + const typename Dest::Scalar *dst_data = internal::extract_data(dst); + if(!(is_same::value && dst_data!=0 && dst_data == extract_data(m_matrix))) dst = m_matrix; for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k0 ? l2/(4 * sizeof(Scalar) * otherStride) : 0; + Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * std::max(otherStride,size)) : 0; subcols = std::max((subcols/Traits::nr)*Traits::nr, Traits::nr); for(Index k2=IsLower ? 0 : size; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/BlasUtil.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/BlasUtil.h index a28f16fa0..e74381432 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/BlasUtil.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/BlasUtil.h @@ -42,16 +42,29 @@ template struct conj_if; template<> struct conj_if { template - inline T operator()(const T& x) { return numext::conj(x); } + inline T operator()(const T& x) const { return numext::conj(x); } template - inline T pconj(const T& x) { return internal::pconj(x); } + inline T pconj(const T& x) const { return internal::pconj(x); } }; template<> struct conj_if { template - inline const T& operator()(const T& x) { return x; } + inline const T& operator()(const T& x) const { return x; } template - inline const T& pconj(const T& x) { return x; } + inline const T& pconj(const T& x) const { return x; } +}; + +// Generic implementation for custom complex types. +template +struct conj_helper +{ + typedef typename scalar_product_traits::ReturnType Scalar; + + EIGEN_STRONG_INLINE Scalar pmadd(const LhsScalar& x, const RhsScalar& y, const Scalar& c) const + { return padd(c, pmul(x,y)); } + + EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar& x, const RhsScalar& y) const + { return conj_if()(x) * conj_if()(y); } }; template struct conj_helper @@ -171,12 +184,13 @@ template struct blas_traits }; // pop conjugate -template -struct blas_traits, NestedXpr> > - : blas_traits +template +struct blas_traits, Xpr> > + : blas_traits::type> { + typedef typename internal::remove_all::type NestedXpr; typedef blas_traits Base; - typedef CwiseUnaryOp, NestedXpr> XprType; + typedef CwiseUnaryOp, Xpr> XprType; typedef typename Base::ExtractType ExtractType; enum { @@ -188,12 +202,13 @@ struct blas_traits, NestedXpr> > }; // pop scalar multiple -template -struct blas_traits, NestedXpr> > - : blas_traits +template +struct blas_traits, Xpr> > + : blas_traits::type> { + typedef typename internal::remove_all::type NestedXpr; typedef blas_traits Base; - typedef CwiseUnaryOp, NestedXpr> XprType; + typedef CwiseUnaryOp, Xpr> XprType; typedef typename Base::ExtractType ExtractType; static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) @@ -201,12 +216,13 @@ struct blas_traits, NestedXpr> > }; // pop opposite -template -struct blas_traits, NestedXpr> > - : blas_traits +template +struct blas_traits, Xpr> > + : blas_traits::type> { + typedef typename internal::remove_all::type NestedXpr; typedef blas_traits Base; - typedef CwiseUnaryOp, NestedXpr> XprType; + typedef CwiseUnaryOp, Xpr> XprType; typedef typename Base::ExtractType ExtractType; static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } static inline Scalar extractScalarFactor(const XprType& x) @@ -214,13 +230,14 @@ struct blas_traits, NestedXpr> > }; // pop/push transpose -template -struct blas_traits > - : blas_traits +template +struct blas_traits > + : blas_traits::type> { + typedef typename internal::remove_all::type NestedXpr; typedef typename NestedXpr::Scalar Scalar; typedef blas_traits Base; - typedef Transpose XprType; + typedef Transpose XprType; typedef Transpose ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS typedef Transpose _ExtractType; typedef typename conditional=6 + + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma GCC diagnostic push + #endif + #pragma GCC diagnostic ignored "-Wignored-attributes" + #endif #endif // not EIGEN_WARNINGS_DISABLED diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h index e0d90eb82..7d9c89fa1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Macros.h @@ -13,7 +13,7 @@ #define EIGEN_WORLD_VERSION 3 #define EIGEN_MAJOR_VERSION 2 -#define EIGEN_MINOR_VERSION 8 +#define EIGEN_MINOR_VERSION 10 #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h index bc1ea69ed..ffa7e34f3 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/Memory.h @@ -507,7 +507,12 @@ template void smart_copy(const T* start, const T* end, T* target) template struct smart_copy_helper { static inline void run(const T* start, const T* end, T* target) - { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); } + { + std::ptrdiff_t size = std::ptrdiff_t(end)-std::ptrdiff_t(start); + if(size==0) return; + eigen_internal_assert(start!=0 && end!=0 && target!=0); + memcpy(target, start, size); + } }; template struct smart_copy_helper { @@ -515,7 +520,6 @@ template struct smart_copy_helper { { std::copy(start, end, target); } }; - /***************************************************************************** *** Implementation of runtime stack allocation (falling back to malloc) *** *****************************************************************************/ @@ -655,99 +659,60 @@ template class aligned_stack_memory_handler /****************************************************************************/ + /** \class aligned_allocator -* \ingroup Core_Module -* -* \brief STL compatible allocator to use with with 16 byte aligned types -* -* Example: -* \code -* // Matrix4f requires 16 bytes alignment: -* std::map< int, Matrix4f, std::less, -* aligned_allocator > > my_map_mat4; -* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: -* std::map< int, Vector3f > my_map_vec3; -* \endcode -* -* \sa \ref TopicStlContainers. -*/ + * \ingroup Core_Module + * + * \brief STL compatible allocator to use with with 16 byte aligned types + * + * Example: + * \code + * // Matrix4f requires 16 bytes alignment: + * std::map< int, Matrix4f, std::less, + * aligned_allocator > > my_map_mat4; + * // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: + * std::map< int, Vector3f > my_map_vec3; + * \endcode + * + * \sa \blank \ref TopicStlContainers. + */ template -class aligned_allocator +class aligned_allocator : public std::allocator { public: - typedef size_t size_type; - typedef std::ptrdiff_t difference_type; - typedef T* pointer; - typedef const T* const_pointer; - typedef T& reference; - typedef const T& const_reference; - typedef T value_type; + typedef size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; - template - struct rebind - { - typedef aligned_allocator other; - }; + template + struct rebind + { + typedef aligned_allocator other; + }; - pointer address( reference value ) const - { - return &value; - } + aligned_allocator() : std::allocator() {} - const_pointer address( const_reference value ) const - { - return &value; - } + aligned_allocator(const aligned_allocator& other) : std::allocator(other) {} - aligned_allocator() - { - } + template + aligned_allocator(const aligned_allocator& other) : std::allocator(other) {} - aligned_allocator( const aligned_allocator& ) - { - } + ~aligned_allocator() {} - template - aligned_allocator( const aligned_allocator& ) - { - } + pointer allocate(size_type num, const void* /*hint*/ = 0) + { + internal::check_size_for_overflow(num); + return static_cast( internal::aligned_malloc(num * sizeof(T)) ); + } - ~aligned_allocator() - { - } - - size_type max_size() const - { - return (std::numeric_limits::max)(); - } - - pointer allocate( size_type num, const void* hint = 0 ) - { - EIGEN_UNUSED_VARIABLE(hint); - internal::check_size_for_overflow(num); - return static_cast( internal::aligned_malloc( num * sizeof(T) ) ); - } - - void construct( pointer p, const T& value ) - { - ::new( p ) T( value ); - } - - void destroy( pointer p ) - { - p->~T(); - } - - void deallocate( pointer p, size_type /*num*/ ) - { - internal::aligned_free( p ); - } - - bool operator!=(const aligned_allocator& ) const - { return false; } - - bool operator==(const aligned_allocator& ) const - { return true; } + void deallocate(pointer p, size_type /*num*/) + { + internal::aligned_free(p); + } }; //---------- Cache sizes ---------- diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ReenableStupidWarnings.h b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ReenableStupidWarnings.h index 5ddfbd4aa..d573bbd4a 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Core/util/ReenableStupidWarnings.h @@ -8,7 +8,10 @@ #pragma warning pop #elif defined __clang__ #pragma clang diagnostic pop + #elif defined __GNUC__ && __GNUC__>=6 + #pragma GCC diagnostic pop #endif + #endif #endif // EIGEN_WARNINGS_DISABLED diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h index 956e80d9e..e5131d20b 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h @@ -327,13 +327,33 @@ GeneralizedEigenSolver::compute(const MatrixType& A, const MatrixTyp } else { - Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1)); - Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1))); - m_alphas.coeffRef(i) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z); - m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z); + // We need to extract the generalized eigenvalues of the pair of a general 2x2 block S and a triangular 2x2 block T + // From the eigen decomposition of T = U * E * U^-1, + // we can extract the eigenvalues of (U^-1 * S * U) / E + // Here, we can take advantage that E = diag(T), and U = [ 1 T_01 ; 0 T_11-T_00], and U^-1 = [1 -T_11/(T_11-T_00) ; 0 1/(T_11-T_00)]. + // Then taking beta=T_00*T_11*(T_11-T_00), we can avoid any division, and alpha is the eigenvalues of A = (U^-1 * S * U) * diag(T_11,T_00) * (T_11-T_00): + + // T = [a b ; 0 c] + // S = [e f ; g h] + RealScalar a = m_realQZ.matrixT().coeff(i, i), b = m_realQZ.matrixT().coeff(i, i+1), c = m_realQZ.matrixT().coeff(i+1, i+1); + RealScalar e = m_matS.coeff(i, i), f = m_matS.coeff(i, i+1), g = m_matS.coeff(i+1, i), h = m_matS.coeff(i+1, i+1); + RealScalar d = c-a; + RealScalar gb = g*b; + Matrix A; + A << (e*d-gb)*c, ((e*b+f*d-h*b)*d-gb*b)*a, + g*c , (gb+h*d)*a; + + // NOTE, we could also compute the SVD of T's block during the QZ factorization so that the respective T block is guaranteed to be diagonal, + // and then we could directly apply the formula below (while taking care of scaling S columns by T11,T00): + + Scalar p = Scalar(0.5) * (A.coeff(i, i) - A.coeff(i+1, i+1)); + Scalar z = sqrt(abs(p * p + A.coeff(i+1, i) * A.coeff(i, i+1))); + m_alphas.coeffRef(i) = ComplexScalar(A.coeff(i+1, i+1) + p, z); + m_alphas.coeffRef(i+1) = ComplexScalar(A.coeff(i+1, i+1) + p, -z); + + m_betas.coeffRef(i) = + m_betas.coeffRef(i+1) = a*c*d; - m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i); - m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i); i += 2; } } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h index 192278d68..a63c08aee 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Eigenvalues/Tridiagonalization.h @@ -367,10 +367,10 @@ void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView() * (conj(h) * matA.col(i).tail(remainingSize))); - hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1); + hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1); matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView() - .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1); + .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1)); matA.col(i).coeffRef(i+1) = beta; hCoeffs.coeffRef(i) = h; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h index 372e422b9..820ac96fe 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Homogeneous.h @@ -75,7 +75,7 @@ template class Homogeneous inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } - inline Scalar coeff(Index row, Index col) const + inline Scalar coeff(Index row, Index col=0) const { if( (int(Direction)==Vertical && row==m_matrix.rows()) || (int(Direction)==Horizontal && col==m_matrix.cols())) diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h index 25ed17bb6..e89ba80b2 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Quaternion.h @@ -276,7 +276,7 @@ public: inline Coefficients& coeffs() { return m_coeffs;} inline const Coefficients& coeffs() const { return m_coeffs;} - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned) + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(IsAligned)) protected: Coefficients m_coeffs; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h index 0186f3b82..e87eec201 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Transform.h @@ -443,7 +443,7 @@ public: operator * (const DiagonalBase &b) const { TransformTimeDiagonalReturnType res(*this); - res.linear() *= b; + res.linearExt() *= b; return res; } @@ -557,7 +557,7 @@ public: return res; } - inline Transform& operator*=(const DiagonalMatrix& s) { linear() *= s; return *this; } + inline Transform& operator*=(const DiagonalMatrix& s) { linearExt() *= s; return *this; } template inline Transform& operator=(const RotationBase& r); @@ -828,7 +828,7 @@ Transform::prescale(const MatrixBase &oth { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) - m_matrix.template block(0,0).noalias() = (other.asDiagonal() * m_matrix.template block(0,0)); + affine().noalias() = (other.asDiagonal() * affine()); return *this; } @@ -1048,7 +1048,7 @@ void Transform::computeRotationScaling(RotationMatrixTy } } -/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being +/** decomposes the linear part of the transformation as a product scaling x rotation, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h index 2e7798686..84f6a0d12 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Geometry/Translation.h @@ -130,8 +130,10 @@ public: } /** Applies translation to vector */ - inline VectorType operator* (const VectorType& other) const - { return m_coeffs + other; } + template + inline typename internal::enable_if::type + operator* (const MatrixBase& vec) const + { return m_coeffs + vec.derived(); } /** \returns the inverse translation (opposite) */ Translation inverse() const { return Translation(-m_coeffs); } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Householder/Householder.h b/gtsam/3rdparty/Eigen/Eigen/src/Householder/Householder.h index 32112af9b..4c1f499a1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Householder/Householder.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Householder/Householder.h @@ -75,8 +75,9 @@ void MatrixBase::makeHouseholder( RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); Scalar c0 = coeff(0); + const RealScalar tol = (std::numeric_limits::min)(); - if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0)) + if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol) { tau = RealScalar(0); beta = numext::real(c0); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h b/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h index d800ca1fa..aea2439a9 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/Householder/HouseholderSequence.h @@ -237,8 +237,9 @@ template class HouseholderS { workspace.resize(rows()); Index vecs = m_length; + const typename Dest::Scalar *dst_data = internal::extract_data(dst); if( internal::is_same::type,Dest>::value - && internal::extract_data(dst) == internal::extract_data(m_vectors)) + && dst_data!=0 && dst_data == internal::extract_data(m_vectors)) { // in-place dst.diagonal().setOnes(); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h index 3cf887193..e836fd696 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/Inverse.h @@ -290,7 +290,7 @@ struct inverse_impl : public ReturnByValue > { const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime); EIGEN_ONLY_USED_FOR_DEBUG(Size); - eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst))) + eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=0 && extract_data(m_matrix)!=extract_data(dst))) && "Aliasing problem detected in inverse(), you need to do inverse().eval() here."); compute_inverse::run(m_matrix, dst); diff --git a/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h b/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h index 60b7a2376..80ec674d6 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/LU/arch/Inverse_SSE.h @@ -151,10 +151,12 @@ struct compute_inverse_size4 iC = _mm_mul_ps(rd,iC); iD = _mm_mul_ps(rd,iD); - result.template writePacket( 0, _mm_shuffle_ps(iA,iB,0x77)); - result.template writePacket( 4, _mm_shuffle_ps(iA,iB,0x22)); - result.template writePacket( 8, _mm_shuffle_ps(iC,iD,0x77)); - result.template writePacket(12, _mm_shuffle_ps(iC,iD,0x22)); + DenseIndex res_stride = result.outerStride(); + float* res = result.data(); + pstoret(res+0, _mm_shuffle_ps(iA,iB,0x77)); + pstoret(res+res_stride, _mm_shuffle_ps(iA,iB,0x22)); + pstoret(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77)); + pstoret(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22)); } }; @@ -311,14 +313,16 @@ struct compute_inverse_size4 iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1); iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2); - result.template writePacket( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det - result.template writePacket( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2)); - result.template writePacket( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det - result.template writePacket( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2)); - result.template writePacket( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det - result.template writePacket(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2)); - result.template writePacket(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det - result.template writePacket(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2)); + DenseIndex res_stride = result.outerStride(); + double* res = result.data(); + pstoret(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); + pstoret(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2)); + pstoret(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); + pstoret(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2)); + pstoret(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); + pstoret(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2)); + pstoret(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); + pstoret(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2)); } }; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h index a955287d1..20acc0226 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -10,6 +10,14 @@ #ifndef EIGEN_PASTIXSUPPORT_H #define EIGEN_PASTIXSUPPORT_H +#if defined(DCOMPLEX) + #define PASTIX_COMPLEX COMPLEX + #define PASTIX_DCOMPLEX DCOMPLEX +#else + #define PASTIX_COMPLEX std::complex + #define PASTIX_DCOMPLEX std::complex +#endif + namespace Eigen { /** \ingroup PaStiXSupport_Module @@ -74,14 +82,14 @@ namespace internal { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} - c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); } void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex *vals, int *perm, int * invp, std::complex *x, int nbrhs, int *iparm, double *dparm) { if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; } if (nbrhs == 0) {x = NULL; nbrhs=1;} - z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); + z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast(vals), perm, invp, reinterpret_cast(x), nbrhs, iparm, dparm); } // Convert the matrix to Fortran-style Numbering diff --git a/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h b/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h index 18cd7d88a..0faacc5f5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/PardisoSupport/PardisoSupport.h @@ -221,11 +221,11 @@ class PardisoImpl m_type = type; bool symmetric = std::abs(m_type) < 10; m_iparm[0] = 1; // No solver default - m_iparm[1] = 3; // use Metis for the ordering - m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS + m_iparm[1] = 2; // use Metis for the ordering + m_iparm[2] = 0; // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??) m_iparm[3] = 0; // No iterative-direct algorithm m_iparm[4] = 0; // No user fill-in reducing permutation - m_iparm[5] = 0; // Write solution into x + m_iparm[5] = 0; // Write solution into x, b is left unchanged m_iparm[6] = 0; // Not in use m_iparm[7] = 2; // Max numbers of iterative refinement steps m_iparm[8] = 0; // Not in use @@ -246,7 +246,10 @@ class PardisoImpl m_iparm[26] = 0; // No matrix checker m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0; m_iparm[34] = 1; // C indexing - m_iparm[59] = 1; // Automatic switch between In-Core and Out-of-Core modes + m_iparm[36] = 0; // CSR + m_iparm[59] = 0; // 0 - In-Core ; 1 - Automatic switch between In-Core and Out-of-Core modes ; 2 - Out-of-Core + + memset(m_pt, 0, sizeof(m_pt)); } protected: @@ -384,7 +387,6 @@ bool PardisoImpl::_solve(const MatrixBase &b, MatrixBase::_solve(const MatrixBase &b, MatrixBase * * \sa \ref TutorialSparseDirectSolvers @@ -447,6 +452,9 @@ class PardisoLU : public PardisoImpl< PardisoLU > * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite. * The vectors or matrices X and B can be either dense or sparse. * + * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: + * \code solver.pardisoParameterArray()[59] = 1; \endcode + * * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used. * Upper|Lower can be used to tell both triangular parts can be used as input. @@ -507,6 +515,9 @@ class PardisoLLT : public PardisoImpl< PardisoLLT > * For complex matrices, A can also be symmetric only, see the \a Options template parameter. * The vectors or matrices X and B can be either dense or sparse. * + * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: + * \code solver.pardisoParameterArray()[59] = 1; \endcode + * * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used. * Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix. diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h index 567eab7cd..1ef77ae32 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/ColPivHouseholderQR.h @@ -127,9 +127,6 @@ template class ColPivHouseholderQR * * \returns a solution. * - * \note The case where b is a matrix is not yet implemented. Also, this - * code is space inefficient. - * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h index 0b39966e1..bdd994795 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/FullPivHouseholderQR.h @@ -134,9 +134,6 @@ template class FullPivHouseholderQR * \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A, * and an arbitrary solution otherwise. * - * \note The case where b is a matrix is not yet implemented. Also, this - * code is space inefficient. - * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution diff --git a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h index 343a66499..e4a3a639f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/QR/HouseholderQR.h @@ -107,9 +107,6 @@ template class HouseholderQR * * \returns a solution. * - * \note The case where b is a matrix is not yet implemented. Also, this - * code is space inefficient. - * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h index 89ace381e..7a5821d4f 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SVD/JacobiSVD.h @@ -359,29 +359,42 @@ struct svd_precondition_2x2_block_to_be_real SVD; typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} + typedef typename MatrixType::RealScalar RealScalar; + static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; } }; template struct svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; + typedef typename SVD::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) + static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry) { using std::sqrt; + using std::abs; + using std::max; Scalar z; JacobiRotation rot; RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); - + + const RealScalar considerAsZero = (std::numeric_limits::min)(); + const RealScalar precision = NumTraits::epsilon(); + if(n==0) { - z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.row(p) *= z; - if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); - if(work_matrix.coeff(q,q)!=Scalar(0)) + // make sure first column is zero + work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0); + + if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero) + { + // work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + work_matrix.row(p) *= z; + if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); + } + if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero) { z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); work_matrix.row(q) *= z; @@ -395,19 +408,25 @@ struct svd_precondition_2x2_block_to_be_real rot.s() = work_matrix.coeff(q,p) / n; work_matrix.applyOnTheLeft(p,q,rot); if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); - if(work_matrix.coeff(p,q) != Scalar(0)) + if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero) { - Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); + z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); work_matrix.col(q) *= z; if(svd.computeV()) svd.m_matrixV.col(q) *= z; } - if(work_matrix.coeff(q,q) != Scalar(0)) + if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero) { z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); work_matrix.row(q) *= z; if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); } } + + // update largest diagonal entry + maxDiagEntry = max EIGEN_EMPTY (maxDiagEntry,max EIGEN_EMPTY (abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q)))); + // and check whether the 2x2 block is already diagonal + RealScalar threshold = max EIGEN_EMPTY (considerAsZero, precision * maxDiagEntry); + return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold; } }; @@ -424,22 +443,23 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, JacobiRotation rot1; RealScalar t = m.coeff(0,0) + m.coeff(1,1); RealScalar d = m.coeff(1,0) - m.coeff(0,1); - if(t == RealScalar(0)) + if(d == RealScalar(0)) { - rot1.c() = RealScalar(0); - rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1); + rot1.s() = RealScalar(0); + rot1.c() = RealScalar(1); } else { - RealScalar t2d2 = numext::hypot(t,d); - rot1.c() = abs(t)/t2d2; - rot1.s() = d/t2d2; - if(tmakeJacobi(m,0,1); - *j_left = rot1 * j_right->transpose(); + *j_left = rot1 * j_right->transpose(); } } // end namespace internal @@ -826,6 +846,7 @@ JacobiSVD::compute(const MatrixType& matrix, unsig check_template_parameters(); using std::abs; + using std::max; allocate(matrix.rows(), matrix.cols(), computationOptions); // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, @@ -857,6 +878,7 @@ JacobiSVD::compute(const MatrixType& matrix, unsig } /*** step 2. The main Jacobi SVD iteration. ***/ + RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff(); bool finished = false; while(!finished) @@ -872,25 +894,27 @@ JacobiSVD::compute(const MatrixType& matrix, unsig // if this 2x2 sub-matrix is not diagonal already... // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't // keep us iterating forever. Similarly, small denormal numbers are considered zero. - using std::max; - RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), - abs(m_workMatrix.coeff(q,q)))); - // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791) + RealScalar threshold = max EIGEN_EMPTY (considerAsZero, precision * maxDiagEntry); if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold) { finished = false; - // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal - internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); - JacobiRotation j_left, j_right; - internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); + // the complex to real operation returns true is the updated 2x2 block is not already diagonal + if(internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q, maxDiagEntry)) + { + JacobiRotation j_left, j_right; + internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); - // accumulate resulting Jacobi rotations - m_workMatrix.applyOnTheLeft(p,q,j_left); - if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose()); + // accumulate resulting Jacobi rotations + m_workMatrix.applyOnTheLeft(p,q,j_left); + if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose()); - m_workMatrix.applyOnTheRight(p,q,j_right); - if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right); + m_workMatrix.applyOnTheRight(p,q,j_right); + if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right); + + // keep track of the largest diagonal coefficient + maxDiagEntry = max EIGEN_EMPTY (maxDiagEntry,max EIGEN_EMPTY (abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); + } } } } diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h index a667cb56e..34cad3df7 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/CompressedStorage.h @@ -102,6 +102,11 @@ class CompressedStorage inline size_t allocatedSize() const { return m_allocatedSize; } inline void clear() { m_size = 0; } + const Scalar* valuePtr() const { return m_values; } + Scalar* valuePtr() { return m_values; } + const Index* indexPtr() const { return m_indices; } + Index* indexPtr() { return m_indices; } + inline Scalar& value(size_t i) { return m_values[i]; } inline const Scalar& value(size_t i) const { return m_values[i]; } @@ -208,8 +213,10 @@ class CompressedStorage Index* newIndices = new Index[size]; size_t copySize = (std::min)(size, m_size); // copy - internal::smart_copy(m_values, m_values+copySize, newValues); - internal::smart_copy(m_indices, m_indices+copySize, newIndices); + if (copySize>0) { + internal::smart_copy(m_values, m_values+copySize, newValues); + internal::smart_copy(m_indices, m_indices+copySize, newIndices); + } // delete old stuff delete[] m_values; delete[] m_indices; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h index 4f4983508..99886079d 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseBlock.h @@ -1,539 +1,623 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2008-2009 Gael Guennebaud -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_SPARSE_BLOCK_H -#define EIGEN_SPARSE_BLOCK_H - -namespace Eigen { - -template -class BlockImpl - : public SparseMatrixBase > -{ - typedef typename internal::remove_all::type _MatrixTypeNested; - typedef Block BlockType; -public: - enum { IsRowMajor = internal::traits::IsRowMajor }; -protected: - enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; -public: - EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) - - class InnerIterator: public XprType::InnerIterator - { - typedef typename BlockImpl::Index Index; - public: - inline InnerIterator(const BlockType& xpr, Index outer) - : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - class ReverseInnerIterator: public XprType::ReverseInnerIterator - { - typedef typename BlockImpl::Index Index; - public: - inline ReverseInnerIterator(const BlockType& xpr, Index outer) - : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - - inline BlockImpl(const XprType& xpr, int i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) - {} - - inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) - {} - - inline const Scalar coeff(int row, int col) const - { - return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart); - } - - inline const Scalar coeff(int index) const - { - return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); - } - - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - - protected: - - typename XprType::Nested m_matrix; - Index m_outerStart; - const internal::variable_if_dynamic m_outerSize; - - EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) - private: - Index nonZeros() const; -}; - - -/*************************************************************************** -* specialisation for SparseMatrix -***************************************************************************/ - -template -class BlockImpl,BlockRows,BlockCols,true,Sparse> - : public SparseMatrixBase,BlockRows,BlockCols,true> > -{ - typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; - typedef typename internal::remove_all::type _MatrixTypeNested; - typedef Block BlockType; - typedef Block ConstBlockType; -public: - enum { IsRowMajor = internal::traits::IsRowMajor }; - EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) -protected: - enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; -public: - - class InnerIterator: public SparseMatrixType::InnerIterator - { - public: - inline InnerIterator(const BlockType& xpr, Index outer) - : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator - { - public: - inline ReverseInnerIterator(const BlockType& xpr, Index outer) - : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - - inline BlockImpl(const SparseMatrixType& xpr, int i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) - {} - - inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) - {} - - template - inline BlockType& operator=(const SparseMatrixBase& other) - { - typedef typename internal::remove_all::type _NestedMatrixType; - _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; - // This assignement is slow if this vector set is not empty - // and/or it is not at the end of the nonzeros of the underlying matrix. - - // 1 - eval to a temporary to avoid transposition and/or aliasing issues - SparseMatrix tmp(other); - - // 2 - let's check whether there is enough allocated memory - Index nnz = tmp.nonZeros(); - Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block - Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block - Index block_size = end - start; // available room in the current block - Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; - - Index free_size = m_matrix.isCompressed() - ? Index(matrix.data().allocatedSize()) + block_size - : block_size; - - if(nnz>free_size) - { - // realloc manually to reduce copies - typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); - - std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar)); - std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index)); - - std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); - std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index)); - - std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); - std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); - - newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); - - matrix.data().swap(newdata); - } - else - { - // no need to realloc, simply copy the tail at its respective position and insert tmp - matrix.data().resize(start + nnz + tail_size); - - std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); - std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); - - std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); - std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index)); - } - - // update innerNonZeros - if(!m_matrix.isCompressed()) - for(Index j=0; j(other); - } - - inline const Scalar* valuePtr() const - { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - inline Scalar* valuePtr() - { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - - inline const Index* innerIndexPtr() const - { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - inline Index* innerIndexPtr() - { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - - inline const Index* outerIndexPtr() const - { return m_matrix.outerIndexPtr() + m_outerStart; } - inline Index* outerIndexPtr() - { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; } - - Index nonZeros() const - { - if(m_matrix.isCompressed()) - return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) - - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); - else if(m_outerSize.value()==0) - return 0; - else - return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); - } - - inline Scalar& coeffRef(int row, int col) - { - return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); - } - - inline const Scalar coeff(int row, int col) const - { - return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); - } - - inline const Scalar coeff(int index) const - { - return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); - } - - const Scalar& lastCoeff() const - { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); - eigen_assert(nonZeros()>0); - if(m_matrix.isCompressed()) - return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; - else - return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; - } - - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - - protected: - - typename SparseMatrixType::Nested m_matrix; - Index m_outerStart; - const internal::variable_if_dynamic m_outerSize; - -}; - - -template -class BlockImpl,BlockRows,BlockCols,true,Sparse> - : public SparseMatrixBase,BlockRows,BlockCols,true> > -{ - typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; - typedef typename internal::remove_all::type _MatrixTypeNested; - typedef Block BlockType; -public: - enum { IsRowMajor = internal::traits::IsRowMajor }; - EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) -protected: - enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; -public: - - class InnerIterator: public SparseMatrixType::InnerIterator - { - public: - inline InnerIterator(const BlockType& xpr, Index outer) - : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator - { - public: - inline ReverseInnerIterator(const BlockType& xpr, Index outer) - : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) - {} - inline Index row() const { return IsRowMajor ? m_outer : this->index(); } - inline Index col() const { return IsRowMajor ? this->index() : m_outer; } - protected: - Index m_outer; - }; - - inline BlockImpl(const SparseMatrixType& xpr, int i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) - {} - - inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) - {} - - inline const Scalar* valuePtr() const - { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - - inline const Index* innerIndexPtr() const - { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } - - inline const Index* outerIndexPtr() const - { return m_matrix.outerIndexPtr() + m_outerStart; } - - Index nonZeros() const - { - if(m_matrix.isCompressed()) - return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) - - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); - else if(m_outerSize.value()==0) - return 0; - else - return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); - } - - inline const Scalar coeff(int row, int col) const - { - return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); - } - - inline const Scalar coeff(int index) const - { - return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); - } - - const Scalar& lastCoeff() const - { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); - eigen_assert(nonZeros()>0); - if(m_matrix.isCompressed()) - return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; - else - return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; - } - - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - - protected: - - EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) - - typename SparseMatrixType::Nested m_matrix; - Index m_outerStart; - const internal::variable_if_dynamic m_outerSize; -}; - -//---------- - -/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this - * is col-major (resp. row-major). - */ -template -typename SparseMatrixBase::InnerVectorReturnType SparseMatrixBase::innerVector(Index outer) -{ return InnerVectorReturnType(derived(), outer); } - -/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this - * is col-major (resp. row-major). Read-only. - */ -template -const typename SparseMatrixBase::ConstInnerVectorReturnType SparseMatrixBase::innerVector(Index outer) const -{ return ConstInnerVectorReturnType(derived(), outer); } - -/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this - * is col-major (resp. row-major). - */ -template -typename SparseMatrixBase::InnerVectorsReturnType -SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) -{ - return Block(derived(), - IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, - IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); - -} - -/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this - * is col-major (resp. row-major). Read-only. - */ -template -const typename SparseMatrixBase::ConstInnerVectorsReturnType -SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const -{ - return Block(derived(), - IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, - IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); - -} - -/** Generic implementation of sparse Block expression. - * Real-only. - */ -template -class BlockImpl - : public SparseMatrixBase >, internal::no_assignment_operator -{ - typedef typename internal::remove_all::type _MatrixTypeNested; - typedef Block BlockType; -public: - enum { IsRowMajor = internal::traits::IsRowMajor }; - EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) - - /** Column or Row constructor - */ - inline BlockImpl(const XprType& xpr, int i) - : m_matrix(xpr), - m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), - m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), - m_blockRows(BlockRows==1 ? 1 : xpr.rows()), - m_blockCols(BlockCols==1 ? 1 : xpr.cols()) - {} - - /** Dynamic-size constructor - */ - inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) - : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) - {} - - inline int rows() const { return m_blockRows.value(); } - inline int cols() const { return m_blockCols.value(); } - - inline Scalar& coeffRef(int row, int col) - { - return m_matrix.const_cast_derived() - .coeffRef(row + m_startRow.value(), col + m_startCol.value()); - } - - inline const Scalar coeff(int row, int col) const - { - return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); - } - - inline Scalar& coeffRef(int index) - { - return m_matrix.const_cast_derived() - .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), - m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); - } - - inline const Scalar coeff(int index) const - { - return m_matrix - .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), - m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); - } - - inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - - class InnerIterator : public _MatrixTypeNested::InnerIterator - { - typedef typename _MatrixTypeNested::InnerIterator Base; - const BlockType& m_block; - Index m_end; - public: - - EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer) - : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), - m_block(block), - m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) - { - while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) - Base::operator++(); - } - - inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline Index row() const { return Base::row() - m_block.m_startRow.value(); } - inline Index col() const { return Base::col() - m_block.m_startCol.value(); } - - inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } - }; - class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator - { - typedef typename _MatrixTypeNested::ReverseInnerIterator Base; - const BlockType& m_block; - Index m_begin; - public: - - EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer) - : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), - m_block(block), - m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) - { - while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) ) - Base::operator--(); - } - - inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline Index row() const { return Base::row() - m_block.m_startRow.value(); } - inline Index col() const { return Base::col() - m_block.m_startCol.value(); } - - inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } - }; - protected: - friend class InnerIterator; - friend class ReverseInnerIterator; - - EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) - - typename XprType::Nested m_matrix; - const internal::variable_if_dynamic m_startRow; - const internal::variable_if_dynamic m_startCol; - const internal::variable_if_dynamic m_blockRows; - const internal::variable_if_dynamic m_blockCols; - private: - Index nonZeros() const; -}; - -} // end namespace Eigen - -#endif // EIGEN_SPARSE_BLOCK_H - +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_BLOCK_H +#define EIGEN_SPARSE_BLOCK_H + +namespace Eigen { + +template +class BlockImpl + : public SparseMatrixBase > +{ +public: + typedef Block BlockType; + enum { IsRowMajor = internal::traits::IsRowMajor }; +protected: + typedef typename internal::remove_all::type _MatrixTypeNested; + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + class InnerIterator: public XprType::InnerIterator + { + typedef typename BlockImpl::Index Index; + public: + inline InnerIterator(const Block& xpr, Index outer) + : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + class ReverseInnerIterator: public XprType::ReverseInnerIterator + { + typedef typename BlockImpl::Index Index; + public: + inline ReverseInnerIterator(const BlockType& xpr, Index outer) + : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + + inline BlockImpl(const XprType& xpr, int i) + : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + {} + + inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + {} + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + typename XprType::Nested m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + private: + Index nonZeros() const; +}; + + +/*************************************************************************** +* specialisation for SparseMatrix +***************************************************************************/ + +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public SparseMatrixBase,BlockRows,BlockCols,true> > +{ + typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef typename internal::remove_all::type _MatrixTypeNested; + typedef Block ConstBlockType; +public: + typedef Block BlockType; + enum { IsRowMajor = internal::traits::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) +protected: + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + + class InnerIterator: public SparseMatrixType::InnerIterator + { + public: + inline InnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator + { + public: + inline ReverseInnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + + inline BlockImpl(const SparseMatrixType& xpr, int i) + : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + {} + + inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + {} + + template + inline BlockType& operator=(const SparseMatrixBase& other) + { + typedef typename internal::remove_all::type _NestedMatrixType; + _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; + // This assignement is slow if this vector set is not empty + // and/or it is not at the end of the nonzeros of the underlying matrix. + + // 1 - eval to a temporary to avoid transposition and/or aliasing issues + SparseMatrix tmp(other); + + // 2 - let's check whether there is enough allocated memory + Index nnz = tmp.nonZeros(); + Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block + Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block + Index block_size = end - start; // available room in the current block + Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; + + Index free_size = m_matrix.isCompressed() + ? Index(matrix.data().allocatedSize()) + block_size + : block_size; + + if(nnz>free_size) + { + // realloc manually to reduce copies + typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); + + std::memcpy(newdata.valuePtr(), m_matrix.data().valuePtr(), start*sizeof(Scalar)); + std::memcpy(newdata.indexPtr(), m_matrix.data().indexPtr(), start*sizeof(Index)); + + std::memcpy(newdata.valuePtr() + start, tmp.data().valuePtr(), nnz*sizeof(Scalar)); + std::memcpy(newdata.indexPtr() + start, tmp.data().indexPtr(), nnz*sizeof(Index)); + + std::memcpy(newdata.valuePtr()+start+nnz, matrix.data().valuePtr()+end, tail_size*sizeof(Scalar)); + std::memcpy(newdata.indexPtr()+start+nnz, matrix.data().indexPtr()+end, tail_size*sizeof(Index)); + + newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); + + matrix.data().swap(newdata); + } + else + { + // no need to realloc, simply copy the tail at its respective position and insert tmp + matrix.data().resize(start + nnz + tail_size); + + std::memmove(matrix.data().valuePtr()+start+nnz, matrix.data().valuePtr()+end, tail_size*sizeof(Scalar)); + std::memmove(matrix.data().indexPtr()+start+nnz, matrix.data().indexPtr()+end, tail_size*sizeof(Index)); + + std::memcpy(matrix.data().valuePtr()+start, tmp.data().valuePtr(), nnz*sizeof(Scalar)); + std::memcpy(matrix.data().indexPtr()+start, tmp.data().indexPtr(), nnz*sizeof(Index)); + } + + // update innerNonZeros + if(!m_matrix.isCompressed()) + for(Index j=0; j(other); + } + + inline const Scalar* valuePtr() const + { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + inline Scalar* valuePtr() + { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + + inline const Index* innerIndexPtr() const + { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + inline Index* innerIndexPtr() + { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + + inline const Index* outerIndexPtr() const + { return m_matrix.outerIndexPtr() + m_outerStart; } + inline Index* outerIndexPtr() + { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; } + + Index nonZeros() const + { + if(m_matrix.isCompressed()) + return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) + - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); + else if(m_outerSize.value()==0) + return 0; + else + return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); + } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } + + const Scalar& lastCoeff() const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); + eigen_assert(nonZeros()>0); + if(m_matrix.isCompressed()) + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; + else + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; + } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + typename SparseMatrixType::Nested m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; + +}; + + +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public SparseMatrixBase,BlockRows,BlockCols,true> > +{ + typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef typename internal::remove_all::type _MatrixTypeNested; +public: + typedef Block BlockType; + enum { IsRowMajor = internal::traits::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) +protected: + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + + class InnerIterator: public SparseMatrixType::InnerIterator + { + public: + inline InnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator + { + public: + inline ReverseInnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + + inline BlockImpl(const SparseMatrixType& xpr, int i) + : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + {} + + inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + {} + + inline const Scalar* valuePtr() const + { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + + inline const Index* innerIndexPtr() const + { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + + inline const Index* outerIndexPtr() const + { return m_matrix.outerIndexPtr() + m_outerStart; } + + Index nonZeros() const + { + if(m_matrix.isCompressed()) + return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) + - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); + else if(m_outerSize.value()==0) + return 0; + else + return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } + + const Scalar& lastCoeff() const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); + eigen_assert(nonZeros()>0); + if(m_matrix.isCompressed()) + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; + else + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; + } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + + typename SparseMatrixType::Nested m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; +}; + +//---------- + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ +template +typename SparseMatrixBase::InnerVectorReturnType SparseMatrixBase::innerVector(Index outer) +{ return InnerVectorReturnType(derived(), outer); } + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ +template +const typename SparseMatrixBase::ConstInnerVectorReturnType SparseMatrixBase::innerVector(Index outer) const +{ return ConstInnerVectorReturnType(derived(), outer); } + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ +template +typename SparseMatrixBase::InnerVectorsReturnType +SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) +{ + return Block(derived(), + IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, + IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); + +} + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ +template +const typename SparseMatrixBase::ConstInnerVectorsReturnType +SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const +{ + return Block(derived(), + IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, + IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); + +} + +namespace internal { + +template< typename XprType, int BlockRows, int BlockCols, bool InnerPanel, + bool OuterVector = (BlockCols==1 && XprType::IsRowMajor) || (BlockRows==1 && !XprType::IsRowMajor)> +class GenericSparseBlockInnerIteratorImpl; + +} + +/** Generic implementation of sparse Block expression. + * Real-only. + */ +template +class BlockImpl + : public SparseMatrixBase >, internal::no_assignment_operator +{ + typedef typename internal::remove_all::type _MatrixTypeNested; + public: + typedef Block BlockType; + enum { IsRowMajor = internal::traits::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + /** Column or Row constructor + */ + inline BlockImpl(const XprType& xpr, int i) + : m_matrix(xpr), + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_blockRows(BlockRows==1 ? 1 : xpr.rows()), + m_blockCols(BlockCols==1 ? 1 : xpr.cols()) + {} + + /** Dynamic-size constructor + */ + inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) + {} + + inline int rows() const { return m_blockRows.value(); } + inline int cols() const { return m_blockCols.value(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived() + .coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); + } + + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived() + .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix + .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } + + typedef internal::GenericSparseBlockInnerIteratorImpl InnerIterator; + + class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator + { + typedef typename _MatrixTypeNested::ReverseInnerIterator Base; + const BlockType& m_block; + Index m_begin; + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer) + : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), + m_block(block), + m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) + { + while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) ) + Base::operator--(); + } + + inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline Index row() const { return Base::row() - m_block.m_startRow.value(); } + inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + + inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } + }; + protected: + friend class internal::GenericSparseBlockInnerIteratorImpl; + friend class ReverseInnerIterator; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + + typename XprType::Nested m_matrix; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; + private: + Index nonZeros() const; +}; + +namespace internal { + template + class GenericSparseBlockInnerIteratorImpl : public internal::remove_all::type::InnerIterator + { + public: + typedef Block BlockType; + enum { + IsRowMajor = BlockType::IsRowMajor + }; + typedef typename BlockType::Index Index; + protected: + typedef typename internal::remove_all::type _MatrixTypeNested; + typedef typename _MatrixTypeNested::InnerIterator Base; + const BlockType& m_block; + Index m_end; + public: + + EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer) + : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), + m_block(block), + m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) + { + while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) + Base::operator++(); + } + + inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline Index row() const { return Base::row() - m_block.m_startRow.value(); } + inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + + inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } + }; + + // Row vector of a column-major sparse matrix or column of a row-major one. + template + class GenericSparseBlockInnerIteratorImpl + { + public: + typedef Block BlockType; + enum { + IsRowMajor = BlockType::IsRowMajor + }; + typedef typename BlockType::Index Index; + typedef typename BlockType::Scalar Scalar; + protected: + typedef typename internal::remove_all::type _MatrixTypeNested; + const BlockType& m_block; + Index m_outerPos; + Index m_innerIndex; + Scalar m_value; + Index m_end; + public: + + EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0) + : + m_block(block), + m_outerPos( (IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) - 1), // -1 so that operator++ finds the first non-zero entry + m_innerIndex(IsRowMajor ? block.m_startRow.value() : block.m_startCol.value()), + m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) + { + EIGEN_UNUSED_VARIABLE(outer); + eigen_assert(outer==0); + + ++(*this); + } + + inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return 0; } + inline Index row() const { return IsRowMajor ? 0 : index(); } + inline Index col() const { return IsRowMajor ? index() : 0; } + + inline Scalar value() const { return m_value; } + + inline GenericSparseBlockInnerIteratorImpl& operator++() + { + // search next non-zero entry + while(m_outerPosm_data.resize(rows()); - Eigen::Map >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); - Eigen::Map >(&this->m_data.value(0), rows()).setOnes(); + Eigen::Map >(this->m_data.indexPtr(), rows()).setLinSpaced(0, rows()-1); + Eigen::Map >(this->m_data.valuePtr(), rows()).setOnes(); Eigen::Map >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); std::free(m_innerNonZeros); m_innerNonZeros = 0; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h index 9341d9ad2..6f4a47cf5 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseMatrixBase.h @@ -38,6 +38,7 @@ template class SparseMatrixBase typedef typename internal::packet_traits::type PacketScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; + typedef typename internal::traits::Index StorageIndex; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h index f3da93a71..51ed9aeb1 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseRedux.h @@ -29,7 +29,10 @@ typename internal::traits >::Scalar SparseMatrix<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); - return Matrix::Map(&m_data.value(0), m_data.size()).sum(); + if(this->isCompressed()) + return Matrix::Map(m_data.valuePtr(), m_data.size()).sum(); + else + return Base::sum(); } template @@ -37,7 +40,7 @@ typename internal::traits >::Scalar SparseVector<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); - return Matrix::Map(&m_data.value(0), m_data.size()).sum(); + return Matrix::Map(m_data.valuePtr(), m_data.size()).sum(); } } // end namespace Eigen diff --git a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index fcc18f5c9..55b84a4eb 100644 --- a/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/gtsam/3rdparty/Eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -48,7 +48,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r res.resize(rows, cols); res.reserve(estimated_nnz_prod); - double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols()); + double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols())); for (Index j=0; j + UmfPackLU(const InputMatrixType& matrix) { init(); compute(matrix); diff --git a/gtsam/3rdparty/Eigen/bench/btl/generic_bench/btl.hh b/gtsam/3rdparty/Eigen/bench/btl/generic_bench/btl.hh index f1a88ff74..86a8438cd 100644 --- a/gtsam/3rdparty/Eigen/bench/btl/generic_bench/btl.hh +++ b/gtsam/3rdparty/Eigen/bench/btl/generic_bench/btl.hh @@ -44,15 +44,10 @@ #define BTL_ASM_COMMENT(X) #endif -#if (defined __GNUC__) && (!defined __INTEL_COMPILER) && !defined(__arm__) && !defined(__powerpc__) -#define BTL_DISABLE_SSE_EXCEPTIONS() { \ - int aux; \ - asm( \ - "stmxcsr %[aux] \n\t" \ - "orl $32832, %[aux] \n\t" \ - "ldmxcsr %[aux] \n\t" \ - : : [aux] "m" (aux)); \ -} +#ifdef __SSE__ +#include "xmmintrin.h" +// This enables flush to zero (FTZ) and denormals are zero (DAZ) modes: +#define BTL_DISABLE_SSE_EXCEPTIONS() { _mm_setcsr(_mm_getcsr() | 0x8040); } #else #define BTL_DISABLE_SSE_EXCEPTIONS() #endif diff --git a/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake b/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake index 2b11d8360..b5b62aee6 100644 --- a/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake +++ b/gtsam/3rdparty/Eigen/cmake/EigenConfigureTesting.cmake @@ -1,7 +1,7 @@ include(EigenTesting) include(CheckCXXSourceCompiles) -# configure the "site" and "buildname" +# configure the "site" and "buildname" ei_set_sitename() # retrieve and store the build string @@ -14,17 +14,10 @@ add_dependencies(check buildtests) # check whether /bin/bash exists find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH) -# CMake/Ctest does not allow us to change the build command, -# so we have to workaround by directly editing the generated DartConfiguration.tcl file -# save CMAKE_MAKE_PROGRAM -set(CMAKE_MAKE_PROGRAM_SAVE ${CMAKE_MAKE_PROGRAM}) -# and set a fake one -set(CMAKE_MAKE_PROGRAM "@EIGEN_MAKECOMMAND_PLACEHOLDER@") - # This call activates testing and generates the DartConfiguration.tcl include(CTest) -set(EIGEN_TEST_BUILD_FLAGS " " CACHE STRING "Options passed to the build command of unit tests") +set(EIGEN_TEST_BUILD_FLAGS "" CACHE STRING "Options passed to the build command of unit tests") # Overwrite default DartConfiguration.tcl such that ctest can build our unit tests. # Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target. diff --git a/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake b/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake index 3c48d4c37..9246fa67c 100644 --- a/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake +++ b/gtsam/3rdparty/Eigen/cmake/EigenDetermineOSVersion.cmake @@ -26,7 +26,7 @@ function(DetermineShortWindowsName WIN_VERSION win_num_version) endfunction() function(DetermineOSVersion OS_VERSION) - if (WIN32) + if (WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows) file (TO_NATIVE_PATH "$ENV{COMSPEC}" SHELL) exec_program( ${SHELL} ARGS "/c" "ver" OUTPUT_VARIABLE ver_output) diff --git a/gtsam/3rdparty/Eigen/doc/SparseLinearSystems.dox b/gtsam/3rdparty/Eigen/doc/SparseLinearSystems.dox index 051a02ed9..b66e2ba2b 100644 --- a/gtsam/3rdparty/Eigen/doc/SparseLinearSystems.dox +++ b/gtsam/3rdparty/Eigen/doc/SparseLinearSystems.dox @@ -46,6 +46,9 @@ They are summarized in the following table: SPQR\link SPQRSupport_Module SPQRSupport \endlink QR factorization Any, rectangularfill-in reducing, multithreaded, fast dense algebra requires the SuiteSparse package, \b GPL recommended for linear least-squares problems, has a rank-revealing feature +PardisoLLT \n PardisoLDLT \n PardisoLU\link PardisoSupport_Module PardisoSupport \endlinkDirect LLt, LDLt, LU factorizationsSPD \n SPD \n SquareFill-in reducing, Leverage fast dense algebra, Multithreading + Requires the Intel MKL package, \b Proprietary + optimized for tough problems patterns, see also \link TopicUsingIntelMKL using MKL with Eigen \endlink Here \c SPD means symmetric positive definite. diff --git a/gtsam/3rdparty/Eigen/doc/TopicAssertions.dox b/gtsam/3rdparty/Eigen/doc/TopicAssertions.dox index 4ead40174..c8b4d84f2 100644 --- a/gtsam/3rdparty/Eigen/doc/TopicAssertions.dox +++ b/gtsam/3rdparty/Eigen/doc/TopicAssertions.dox @@ -16,7 +16,7 @@ Both eigen_assert and eigen_plain_assert are defined in Macros.h. Defining eigen #include #undef eigen_assert #define eigen_assert(x) \ - if (!x) { throw (std::runtime_error("Put your message here")); } + if (!(x)) { throw (std::runtime_error("Put your message here")); } \endcode \subsection DisableAssert Disabling assertions diff --git a/gtsam/3rdparty/Eigen/test/CMakeLists.txt b/gtsam/3rdparty/Eigen/test/CMakeLists.txt index c0d8a4e28..40c8f669d 100644 --- a/gtsam/3rdparty/Eigen/test/CMakeLists.txt +++ b/gtsam/3rdparty/Eigen/test/CMakeLists.txt @@ -125,6 +125,7 @@ endif(TEST_LIB) set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Official") add_custom_target(BuildOfficial) +ei_add_test(rand) ei_add_test(meta) ei_add_test(sizeof) ei_add_test(dynalloc) diff --git a/gtsam/3rdparty/Eigen/test/commainitializer.cpp b/gtsam/3rdparty/Eigen/test/commainitializer.cpp index 99102b966..296592340 100644 --- a/gtsam/3rdparty/Eigen/test/commainitializer.cpp +++ b/gtsam/3rdparty/Eigen/test/commainitializer.cpp @@ -9,14 +9,69 @@ #include "main.h" + +template +void test_blocks() +{ + Matrix m_fixed; + MatrixXi m_dynamic(M1+M2, N1+N2); + + Matrix mat11; mat11.setRandom(); + Matrix mat12; mat12.setRandom(); + Matrix mat21; mat21.setRandom(); + Matrix mat22; mat22.setRandom(); + + MatrixXi matx11 = mat11, matx12 = mat12, matx21 = mat21, matx22 = mat22; + + { + VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat21, matx22).finished(), (m_dynamic << mat11, matx12, mat21, matx22).finished()); + VERIFY_IS_EQUAL((m_fixed.template topLeftCorner()), mat11); + VERIFY_IS_EQUAL((m_fixed.template topRightCorner()), mat12); + VERIFY_IS_EQUAL((m_fixed.template bottomLeftCorner()), mat21); + VERIFY_IS_EQUAL((m_fixed.template bottomRightCorner()), mat22); + VERIFY_IS_EQUAL((m_fixed << mat12, mat11, matx21, mat22).finished(), (m_dynamic << mat12, matx11, matx21, mat22).finished()); + } + + if(N1 > 0) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22)); + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22)); + } + else + { + // allow insertion of zero-column blocks: + VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat11, mat11, mat21, mat21, mat22).finished(), (m_dynamic << mat12, mat22).finished()); + } + if(M1 != M2) + { + VERIFY_RAISES_ASSERT((m_fixed << mat11, mat21, mat12, mat22)); + } +} + + +template +struct test_block_recursion +{ + static void run() + { + test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>(); + test_block_recursion::run(); + } +}; + +template<> +struct test_block_recursion<-1> +{ + static void run() { } +}; + void test_commainitializer() { Matrix3d m3; Matrix4d m4; - VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) ); - #ifndef _MSC_VER + VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) ); VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ); #endif @@ -43,4 +98,7 @@ void test_commainitializer() 4, 5, 6, vec[2].transpose(); VERIFY_IS_APPROX(m3, ref); + + // recursively test all block-sizes from 0 to 3: + test_block_recursion<(1<<8) - 1>(); } diff --git a/gtsam/3rdparty/Eigen/test/cwiseop.cpp b/gtsam/3rdparty/Eigen/test/cwiseop.cpp index e3361da17..d13002cae 100644 --- a/gtsam/3rdparty/Eigen/test/cwiseop.cpp +++ b/gtsam/3rdparty/Eigen/test/cwiseop.cpp @@ -164,9 +164,12 @@ template void cwiseops(const MatrixType& m) VERIFY( (m1.cwise().min(m2).cwise() < (m1+mones)).all() ); VERIFY( (m1.cwise().max(m2).cwise() > (m1-mones)).all() ); +#if(__cplusplus < 201103L) +// std::binder* are deprecated since c++11 and will be removed in c++17 VERIFY( (m1.cwise()(), Scalar(1)))).all() ); VERIFY( !(m1.cwise()(), Scalar(1)))).all() ); VERIFY( !(m1.cwise()>m1bis.unaryExpr(bind2nd(plus(), Scalar(1)))).any() ); +#endif cwiseops_real_only(m1, m2, m3, mones); } diff --git a/gtsam/3rdparty/Eigen/test/eigen2/eigen2_cwiseop.cpp b/gtsam/3rdparty/Eigen/test/eigen2/eigen2_cwiseop.cpp index 22e1cc342..a36edd473 100644 --- a/gtsam/3rdparty/Eigen/test/eigen2/eigen2_cwiseop.cpp +++ b/gtsam/3rdparty/Eigen/test/eigen2/eigen2_cwiseop.cpp @@ -137,9 +137,12 @@ template void cwiseops(const MatrixType& m) VERIFY( (m1.cwise().min(m2).cwise() < (m1+mones)).all() ); VERIFY( (m1.cwise().max(m2).cwise() > (m1-mones)).all() ); +#if(__cplusplus < 201103L) +// std::binder* are deprecated since c++11 and will be removed in c++17 VERIFY( (m1.cwise()(), Scalar(1)))).all() ); VERIFY( !(m1.cwise()(), Scalar(1)))).all() ); VERIFY( !(m1.cwise()>m1.unaryExpr(bind2nd(plus(), Scalar(1)))).any() ); +#endif } void test_eigen2_cwiseop() diff --git a/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp b/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp index c91bde819..01330308a 100644 --- a/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_homogeneous.cpp @@ -54,6 +54,8 @@ template void homogeneous(void) T2MatrixType t2 = T2MatrixType::Random(); VERIFY_IS_APPROX(t2 * (v0.homogeneous().eval()), t2 * v0.homogeneous()); VERIFY_IS_APPROX(t2 * (m0.colwise().homogeneous().eval()), t2 * m0.colwise().homogeneous()); + VERIFY_IS_APPROX(t2 * (v0.homogeneous().asDiagonal()), t2 * hv0.asDiagonal()); + VERIFY_IS_APPROX((v0.homogeneous().asDiagonal()) * t2, hv0.asDiagonal() * t2); VERIFY_IS_APPROX((v0.transpose().rowwise().homogeneous().eval()) * t2, v0.transpose().rowwise().homogeneous() * t2); diff --git a/gtsam/3rdparty/Eigen/test/geo_transformations.cpp b/gtsam/3rdparty/Eigen/test/geo_transformations.cpp index 547765714..383c42bad 100644 --- a/gtsam/3rdparty/Eigen/test/geo_transformations.cpp +++ b/gtsam/3rdparty/Eigen/test/geo_transformations.cpp @@ -320,6 +320,9 @@ template void transformations() t0.scale(v0); t1 *= AlignedScaling3(v0); VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); + t1 = AlignedScaling3(v0) * (Translation3(v0) * Transform3(q1)); + t1 = t1 * v0.asDiagonal(); + VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); // transformation * translation t0.translate(v0); t1 = t1 * Translation3(v0); @@ -437,6 +440,79 @@ template void transformations() Rotation2D r2(r1); // copy ctor VERIFY_IS_APPROX(r2.angle(),s0); } + + { + Transform3 t32(Matrix4::Random()), t33, t34; + t34 = t33 = t32; + t32.scale(v0); + t33*=AlignedScaling3(v0); + VERIFY_IS_APPROX(t32.matrix(), t33.matrix()); + t33 = t34 * AlignedScaling3(v0); + VERIFY_IS_APPROX(t32.matrix(), t33.matrix()); + } + +} + +template +void transform_associativity_left(const A1& a1, const A2& a2, const P& p, const Q& q, const V& v, const H& h) +{ + VERIFY_IS_APPROX( q*(a1*v), (q*a1)*v ); + VERIFY_IS_APPROX( q*(a2*v), (q*a2)*v ); + VERIFY_IS_APPROX( q*(p*h).hnormalized(), ((q*p)*h).hnormalized() ); +} + +template +void transform_associativity2(const A1& a1, const A2& a2, const P& p, const Q& q, const V& v, const H& h) +{ + VERIFY_IS_APPROX( a1*(q*v), (a1*q)*v ); + VERIFY_IS_APPROX( a2*(q*v), (a2*q)*v ); + VERIFY_IS_APPROX( p *(q*v).homogeneous(), (p *q)*v.homogeneous() ); + + transform_associativity_left(a1, a2,p, q, v, h); +} + +template +void transform_associativity(const RotationType& R) +{ + typedef Matrix VectorType; + typedef Matrix HVectorType; + typedef Matrix LinearType; + typedef Matrix MatrixType; + typedef Transform AffineCompactType; + typedef Transform AffineType; + typedef Transform ProjectiveType; + typedef DiagonalMatrix ScalingType; + typedef Translation TranslationType; + + AffineCompactType A1c; A1c.matrix().setRandom(); + AffineCompactType A2c; A2c.matrix().setRandom(); + AffineType A1(A1c); + AffineType A2(A2c); + ProjectiveType P1; P1.matrix().setRandom(); + VectorType v1 = VectorType::Random(); + VectorType v2 = VectorType::Random(); + HVectorType h1 = HVectorType::Random(); + Scalar s1 = internal::random(); + LinearType L = LinearType::Random(); + MatrixType M = MatrixType::Random(); + + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, A2, v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, A2c, v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, v1.asDiagonal(), v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, ScalingType(v1), v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, Scaling(v1), v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, Scaling(s1), v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, TranslationType(v1), v2, h1) ); + CALL_SUBTEST( transform_associativity_left(A1c, A1, P1, L, v2, h1) ); + CALL_SUBTEST( transform_associativity2(A1c, A1, P1, R, v2, h1) ); + + VERIFY_IS_APPROX( A1*(M*h1), (A1*M)*h1 ); + VERIFY_IS_APPROX( A1c*(M*h1), (A1c*M)*h1 ); + VERIFY_IS_APPROX( P1*(M*h1), (P1*M)*h1 ); + + VERIFY_IS_APPROX( M*(A1*h1), (M*A1)*h1 ); + VERIFY_IS_APPROX( M*(A1c*h1), (M*A1c)*h1 ); + VERIFY_IS_APPROX( M*(P1*h1), ((M*P1)*h1) ); } template void transform_alignment() @@ -517,5 +593,8 @@ void test_geo_transformations() CALL_SUBTEST_7(( transform_products() )); CALL_SUBTEST_7(( transform_products() )); + + CALL_SUBTEST_8(( transform_associativity(Rotation2D(internal::random()*double(3.14))) )); + CALL_SUBTEST_8(( transform_associativity(Quaterniond(Vector4d::Random().normalized())) )); } } diff --git a/gtsam/3rdparty/Eigen/test/packetmath.cpp b/gtsam/3rdparty/Eigen/test/packetmath.cpp index 38aa256ce..bac7b02d1 100644 --- a/gtsam/3rdparty/Eigen/test/packetmath.cpp +++ b/gtsam/3rdparty/Eigen/test/packetmath.cpp @@ -327,6 +327,7 @@ template void test_conj_helper(Scalar ref[i] += cj0(data1[i]) * cj1(data2[i]); VERIFY(internal::isApprox(ref[i], cj.pmadd(data1[i],data2[i],tmp)) && "conj_helper pmadd"); } + *pval += 0; // Workaround msvc 2013 issue (bad code generation) internal::pstore(pval,pcj.pmadd(internal::pload(data1),internal::pload(data2),internal::pload(pval))); VERIFY(areApprox(ref, pval, PacketSize) && "conj_helper pmadd"); } diff --git a/gtsam/3rdparty/Eigen/test/prec_inverse_4x4.cpp b/gtsam/3rdparty/Eigen/test/prec_inverse_4x4.cpp index c4ef2d4bd..eb6ad18c9 100644 --- a/gtsam/3rdparty/Eigen/test/prec_inverse_4x4.cpp +++ b/gtsam/3rdparty/Eigen/test/prec_inverse_4x4.cpp @@ -53,14 +53,29 @@ template void inverse_general_4x4(int repeat) // FIXME that 1.25 used to be 1.2 until we tested gcc 4.1 on 30 June 2010 and got 1.21. VERIFY(error_avg < (NumTraits::IsComplex ? 8.0 : 1.25)); VERIFY(error_max < (NumTraits::IsComplex ? 64.0 : 20.0)); + + { + int s = 5;//internal::random(4,10); + int i = 0;//internal::random(0,s-4); + int j = 0;//internal::random(0,s-4); + Matrix mat(s,s); + mat.setRandom(); + MatrixType submat = mat.template block<4,4>(i,j); + MatrixType mat_inv = mat.template block<4,4>(i,j).inverse(); + VERIFY_IS_APPROX(mat_inv, submat.inverse()); + mat.template block<4,4>(i,j) = submat.inverse(); + VERIFY_IS_APPROX(mat_inv, (mat.template block<4,4>(i,j))); + } } void test_prec_inverse_4x4() { CALL_SUBTEST_1((inverse_permutation_4x4())); CALL_SUBTEST_1(( inverse_general_4x4(200000 * g_repeat) )); + CALL_SUBTEST_1(( inverse_general_4x4 >(200000 * g_repeat) )); CALL_SUBTEST_2((inverse_permutation_4x4 >())); + CALL_SUBTEST_2(( inverse_general_4x4 >(200000 * g_repeat) )); CALL_SUBTEST_2(( inverse_general_4x4 >(200000 * g_repeat) )); CALL_SUBTEST_3((inverse_permutation_4x4())); diff --git a/gtsam/3rdparty/Eigen/test/product.h b/gtsam/3rdparty/Eigen/test/product.h index 0d054ff46..397af24ae 100644 --- a/gtsam/3rdparty/Eigen/test/product.h +++ b/gtsam/3rdparty/Eigen/test/product.h @@ -178,4 +178,12 @@ template void product(const MatrixType& m) // CwiseUnaryOp VERIFY_IS_APPROX(x = Scalar(1.)*(A*x), A*z); } + + // regression for blas_trais + { + VERIFY_IS_APPROX(square * (square*square).transpose(), square * square.transpose() * square.transpose()); + VERIFY_IS_APPROX(square * (-(square*square)), -square * square * square); + VERIFY_IS_APPROX(square * (s1*(square*square)), s1 * square * square * square); + VERIFY_IS_APPROX(square * (square*square).conjugate(), square * square.conjugate() * square.conjugate()); + } } diff --git a/gtsam/3rdparty/Eigen/test/sparse_basic.cpp b/gtsam/3rdparty/Eigen/test/sparse_basic.cpp index abe6a9d14..2df7b63a7 100644 --- a/gtsam/3rdparty/Eigen/test/sparse_basic.cpp +++ b/gtsam/3rdparty/Eigen/test/sparse_basic.cpp @@ -299,6 +299,14 @@ template void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0))); else VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0))); + + DenseVector rv = DenseVector::Random(m1.cols()); + DenseVector cv = DenseVector::Random(m1.rows()); + Index r = internal::random(0,m1.rows()-2); + Index c = internal::random(0,m1.cols()-1); + VERIFY_IS_APPROX(( m1.template block<1,Dynamic>(r,0,1,m1.cols()).dot(rv)) , refM1.row(r).dot(rv)); + VERIFY_IS_APPROX(m1.row(r).dot(rv), refM1.row(r).dot(rv)); + VERIFY_IS_APPROX(m1.col(c).dot(cv), refM1.col(c).dot(cv)); VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate()); VERIFY_IS_APPROX(m1.real(), refM1.real()); diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h index 6825a7882..88dba54f5 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h @@ -293,7 +293,7 @@ void MatrixExponential::computeUV(float) const float maxnorm = 3.925724783138660f; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = m_M / Scalar(pow(2, m_squarings)); pade7(A); } } @@ -315,7 +315,7 @@ void MatrixExponential::computeUV(double) const double maxnorm = 5.371920351148152; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = m_M / Scalar(pow(2, m_squarings)); pade13(A); } } @@ -340,7 +340,7 @@ void MatrixExponential::computeUV(long double) const long double maxnorm = 4.0246098906697353063L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = m_M / Scalar(pow(2, m_squarings)); pade13(A); } #elif LDBL_MANT_DIG <= 106 // double-double @@ -376,7 +376,7 @@ void MatrixExponential::computeUV(long double) const long double maxnorm = 2.884233277829519311757165057717815L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = m_M / Scalar(pow(2, m_squarings)); pade17(A); } #else diff --git a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h index 49db8d35d..9ea23a9a1 100644 --- a/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h +++ b/gtsam/3rdparty/Eigen/unsupported/Eigen/src/Splines/SplineFwd.h @@ -31,6 +31,8 @@ namespace Eigen enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ }; + + enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; /** \brief The data type used to store non-zero basis functions. */ typedef Array BasisVectorType; @@ -39,7 +41,7 @@ namespace Eigen typedef Array BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ - typedef Array DerivativeType; + typedef Array DerivativeType; /** \brief The point type the spline is representing. */ typedef Array PointType; @@ -62,12 +64,14 @@ namespace Eigen { enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ }; + + enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; /** \brief The data type used to store the values of the basis function derivatives. */ typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ - typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; + typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; }; /** \brief 2D float B-spline with dynamic degree. */ diff --git a/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp b/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp index 087e7c542..7c112a1f0 100644 --- a/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp +++ b/gtsam/3rdparty/Eigen/unsupported/test/autodiff.cpp @@ -162,6 +162,15 @@ void test_autodiff_jacobian() CALL_SUBTEST(( forward_jacobian(TestFunc1(3,3)) )); } +double bug_1222() { + typedef Eigen::AutoDiffScalar AD; + const double _cv1_3 = 1.0; + const AD chi_3 = 1.0; + // this line did not work, because operator+ returns ADS, which then cannot be converted to ADS + const AD denom = chi_3 + _cv1_3; + return denom.value(); +} + void test_autodiff() { for(int i = 0; i < g_repeat; i++) { @@ -169,5 +178,7 @@ void test_autodiff() CALL_SUBTEST_2( test_autodiff_vector() ); CALL_SUBTEST_3( test_autodiff_jacobian() ); } + + bug_1222(); } From b0c57d34dbe03a8365b0c2446656839e203bccfc Mon Sep 17 00:00:00 2001 From: chrisbeall Date: Fri, 14 Oct 2016 16:30:03 -0700 Subject: [PATCH 07/10] fix typename --- gtsam/slam/BetweenFactor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index bfd7d4e34..e6aab45da 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -92,7 +92,7 @@ namespace gtsam { T hx = traits::Between(p1, p2, H1, H2); // h(x) // manifold equivalent of h(x)-z -> log(z,h(x)) #ifdef SLOW_BUT_CORRECT_BETWEENFACTOR - typename traits::ChartJacobian::Fixed Hlocal; + typename traits::ChartJacobian::Jacobian Hlocal; Vector rval = traits::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0); if (H1) *H1 = Hlocal * (*H1); if (H1) *H2 = Hlocal * (*H2); From 799c5d8bb90d929da369d9b1caaef0081458d8bd Mon Sep 17 00:00:00 2001 From: = Date: Tue, 18 Oct 2016 14:00:12 -0400 Subject: [PATCH 08/10] Replace iostream with iosfwd. --- gtsam/geometry/Quaternion.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/geometry/Quaternion.h b/gtsam/geometry/Quaternion.h index af9481181..4f5495263 100644 --- a/gtsam/geometry/Quaternion.h +++ b/gtsam/geometry/Quaternion.h @@ -21,7 +21,7 @@ #include #include // Logmap/Expmap derivatives #include -#include +#include #define QUATERNION_TYPE Eigen::Quaternion<_Scalar,_Options> From 08c9493b6dd8922ae2dc753150ff38d24ab93ff9 Mon Sep 17 00:00:00 2001 From: chrisbeall Date: Thu, 20 Oct 2016 18:32:22 -0700 Subject: [PATCH 09/10] fix include --- gtsam/base/SymmetricBlockMatrix.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/base/SymmetricBlockMatrix.h b/gtsam/base/SymmetricBlockMatrix.h index da3a9a8b8..53a8912f6 100644 --- a/gtsam/base/SymmetricBlockMatrix.h +++ b/gtsam/base/SymmetricBlockMatrix.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include namespace boost { namespace serialization { From efd966b45adc9bd2c9a49b436889cb0010b440ef Mon Sep 17 00:00:00 2001 From: chrisbeall Date: Tue, 1 Nov 2016 15:11:57 -0400 Subject: [PATCH 10/10] Move print methods to cpp files wherever possible --- gtsam/base/GenericValue.h | 3 +-- gtsam/geometry/Cal3_S2Stereo.cpp | 39 +++++++++++++++++++++++++++++++ gtsam/geometry/Cal3_S2Stereo.h | 12 +++------- gtsam/geometry/Quaternion.h | 2 +- gtsam/geometry/SO3.cpp | 7 ++++++ gtsam/geometry/SO3.h | 6 ++--- gtsam_unstable/geometry/Event.cpp | 14 +++++++++++ gtsam_unstable/geometry/Event.h | 11 +++------ 8 files changed, 70 insertions(+), 24 deletions(-) create mode 100644 gtsam/geometry/Cal3_S2Stereo.cpp diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index 7c4d0398d..5b59f4872 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -26,9 +26,8 @@ #include #include -#include -#include // operator typeid #include +#include // operator typeid namespace gtsam { diff --git a/gtsam/geometry/Cal3_S2Stereo.cpp b/gtsam/geometry/Cal3_S2Stereo.cpp new file mode 100644 index 000000000..22966ee37 --- /dev/null +++ b/gtsam/geometry/Cal3_S2Stereo.cpp @@ -0,0 +1,39 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Cal3_S2Stereo.cpp + * @brief The most common 5DOF 3D->2D calibration + Stereo baseline + * @author Chris Beall + */ + +#include + +#include + +namespace gtsam { +using namespace std; + +/* ************************************************************************* */ +void Cal3_S2Stereo::print(const std::string& s) const { + K_.print(s+"K: "); + std::cout << s << "Baseline: " << b_ << std::endl; + } + +/* ************************************************************************* */ +bool Cal3_S2Stereo::equals(const Cal3_S2Stereo& other, double tol) const { + if (fabs(b_ - other.b_) > tol) return false; + return K_.equals(other.K_,tol); +} + +/* ************************************************************************* */ + +} // namespace gtsam diff --git a/gtsam/geometry/Cal3_S2Stereo.h b/gtsam/geometry/Cal3_S2Stereo.h index 29ccd194d..db49448ec 100644 --- a/gtsam/geometry/Cal3_S2Stereo.h +++ b/gtsam/geometry/Cal3_S2Stereo.h @@ -18,7 +18,7 @@ #pragma once #include -#include +#include namespace gtsam { @@ -63,16 +63,10 @@ namespace gtsam { /// @name Testable /// @{ - void print(const std::string& s = "") const { - K_.print(s+"K: "); - std::cout << s << "Baseline: " << b_ << std::endl; - } + void print(const std::string& s = "") const; /// Check if equal up to specified tolerance - bool equals(const Cal3_S2Stereo& other, double tol = 10e-9) const { - if (fabs(b_ - other.b_) > tol) return false; - return K_.equals(other.K_,tol); - } + bool equals(const Cal3_S2Stereo& other, double tol = 10e-9) const; /// @} /// @name Standard Interface diff --git a/gtsam/geometry/Quaternion.h b/gtsam/geometry/Quaternion.h index 4f5495263..af9481181 100644 --- a/gtsam/geometry/Quaternion.h +++ b/gtsam/geometry/Quaternion.h @@ -21,7 +21,7 @@ #include #include // Logmap/Expmap derivatives #include -#include +#include #define QUATERNION_TYPE Eigen::Quaternion<_Scalar,_Options> diff --git a/gtsam/geometry/SO3.cpp b/gtsam/geometry/SO3.cpp index bd111d9b1..07c03ab49 100644 --- a/gtsam/geometry/SO3.cpp +++ b/gtsam/geometry/SO3.cpp @@ -22,6 +22,7 @@ #include #include #include +#include namespace gtsam { @@ -116,6 +117,12 @@ SO3 SO3::AxisAngle(const Vector3& axis, double theta) { return so3::ExpmapFunctor(axis, theta).expmap(); } +/* ************************************************************************* */ +void SO3::print(const std::string& s) const { + std::cout << s << *this << std::endl; + } + +/* ************************************************************************* */ SO3 SO3::Expmap(const Vector3& omega, ChartJacobian H) { if (H) { so3::DexpFunctor impl(omega); diff --git a/gtsam/geometry/SO3.h b/gtsam/geometry/SO3.h index 0396fbce0..53f2c2130 100644 --- a/gtsam/geometry/SO3.h +++ b/gtsam/geometry/SO3.h @@ -24,7 +24,7 @@ #include #include -#include +#include namespace gtsam { @@ -68,9 +68,7 @@ public: /// @name Testable /// @{ - void print(const std::string& s) const { - std::cout << s << *this << std::endl; - } + void print(const std::string& s) const; bool equals(const SO3 & R, double tol) const { return equal_with_abs_tol(*this, R, tol); diff --git a/gtsam_unstable/geometry/Event.cpp b/gtsam_unstable/geometry/Event.cpp index dda2c32e6..c503514a6 100644 --- a/gtsam_unstable/geometry/Event.cpp +++ b/gtsam_unstable/geometry/Event.cpp @@ -18,7 +18,21 @@ */ #include +#include namespace gtsam { +/* ************************************************************************* */ +void Event::print(const std::string& s) const { + std::cout << s << "time = " << time_ << "location = " << location_.transpose(); +} + +/* ************************************************************************* */ +bool Event::equals(const Event& other, double tol) const { + return std::abs(time_ - other.time_) < tol + && traits::Equals(location_, other.location_, tol); +} + +/* ************************************************************************* */ + } //\ namespace gtsam diff --git a/gtsam_unstable/geometry/Event.h b/gtsam_unstable/geometry/Event.h index 5bd37d2d2..d9bacd106 100644 --- a/gtsam_unstable/geometry/Event.h +++ b/gtsam_unstable/geometry/Event.h @@ -21,7 +21,7 @@ #include #include -#include +#include namespace gtsam { @@ -60,15 +60,10 @@ public: } /** print with optional string */ - void print(const std::string& s = "") const { - std::cout << s << "time = " << time_ << "location = " << location_.transpose(); - } + void print(const std::string& s = "") const; /** equals with an tolerance */ - bool equals(const Event& other, double tol = 1e-9) const { - return std::abs(time_ - other.time_) < tol - && traits::Equals(location_, other.location_, tol); - } + bool equals(const Event& other, double tol = 1e-9) const; /// Updates a with tangent space delta inline Event retract(const Vector4& v) const {