Merge branch 'develop'

Conflicts:
	gtsam_unstable/slam/SmartFactorBase.h
release/4.3a0
Luca 2014-05-28 18:11:39 -04:00
commit 386ed2e510
268 changed files with 5726 additions and 3484 deletions

2188
.cproject

File diff suppressed because it is too large Load Diff

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
/build*
/doc*
*.pyc
*.DS_Store

View File

@ -91,10 +91,10 @@ set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
# If using Boost shared libs, disable auto linking
if(MSVC)
# Some libraries, at least Boost Program Options, rely on this to export DLL symbols
add_definitions(-DBOOST_ALL_DYN_LINK)
# Disable autolinking
if(NOT Boost_USE_STATIC_LIBS)
add_definitions(-DBOOST_ALL_NO_LIB)
add_definitions(-DBOOST_ALL_DYN_LINK)
endif()
endif()

19
DEVELOP Normal file
View File

@ -0,0 +1,19 @@
Information for developers
Coding Conventions:
* Classes are Uppercase, methods and functions lowerMixedCase
* We use a modified K&R Style, with 2-space tabs, inserting spaces for tabs
* Use meaningful variable names, e.g., measurement not msm
Windows:
On Windows it is necessary to explicitly export all functions from the library
which should be externally accessible. To do this, include the macro
GTSAM_EXPORT in your class or function definition.
For example:
class GTSAM_EXPORT MyClass { ... };
GTSAM_EXPORT myFunction();

View File

@ -24,7 +24,7 @@ Optional dependent libraries:
may be installed from the Ubuntu repositories, and for other platforms it
may be downloaded from https://www.threadingbuildingblocks.org/
Tested compilers
Tested compilers:
- GCC 4.2-4.7
- OSX Clang 2.9-5.0
@ -35,7 +35,12 @@ Tested systems:
- Ubuntu 11.04 - 13.10
- MacOS 10.6 - 10.9
- Windows 7, 8
- Windows 7, 8, 8.1
Known issues:
- MSVC 2013 is not yet supported because it cannot build the serialization module
of Boost 1.55 (or earlier).
2)
GTSAM makes extensive use of debug assertions, and we highly recommend you work

View File

@ -56,7 +56,7 @@ endif()
# Clang on Mac uses a template depth that is less than standard and is too small
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
if(NOT "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "5.0")
add_definitions(-ftemplate-depth=1024)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ftemplate-depth=1024")
endif()
endif()

View File

@ -1,173 +1,43 @@
# This file should be used as a template for creating new projects using the CMake tools
# This project has the following features
# - GTSAM linking
# - Boost linking
# - Unit tests via CppUnitLite
# - Automatic detection of sources and headers in subfolders
# - Installation of library and headers
# - Matlab wrap interface with within-project building
# - Use of GTSAM cmake macros
# - Scripts
# - Automatic MATLAB wrapper generation
###################################################################################
# To create your own project, replace "myproject" with the actual name of your project
# To create your own project, replace "example" with the actual name of your project
cmake_minimum_required(VERSION 2.6)
enable_testing()
project(myproject CXX C)
project(example CXX C)
# Add the cmake subfolder to the cmake module path - necessary to use macros
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${PROJECT_SOURCE_DIR}/cmake")
# Include GTSAM CMake tools
find_package(GTSAMCMakeTools)
include(GtsamBuildTypes) # Load build type flags and default to Debug mode
include(GtsamTesting) # Easy functions for creating unit tests and scripts
include(GtsamMatlabWrap) # Automatic MATLAB wrapper generation
# Ensure that local folder is searched before library folders
include_directories(BEFORE "${PROJECT_SOURCE_DIR}")
# Load build type flags and default to Debug mode
include(GtsamBuildTypes)
###################################################################################
# Create a list of library dependencies
# These will be linked with executables
set(library_deps "")
set(linking_mode "static")
# Find GTSAM components
find_package(GTSAM REQUIRED) # Uses installed package
list(APPEND library_deps gtsam-${linking_mode} gtsam_unstable-${linking_mode})
# Include ransac
find_package(ransac REQUIRED) # Uses installed package
list(APPEND library_deps ransac-${linking_mode})
# Boost - same requirement as gtsam
find_package(Boost 1.43 COMPONENTS
serialization
system
filesystem
thread
date_time
REQUIRED)
list(APPEND library_deps
${Boost_SERIALIZATION_LIBRARY}
${Boost_SYSTEM_LIBRARY}
${Boost_FILESYSTEM_LIBRARY}
${Boost_THREAD_LIBRARY}
${Boost_DATE_TIME_LIBRARY})
include_directories(${Boost_INCLUDE_DIR} ${GTSAM_INCLUDE_DIR} ${ransac_INCLUDE_DIR})
include_directories(${GTSAM_INCLUDE_DIR})
###################################################################################
# List subdirs to process tests/sources
# Each of these will be scanned for new files
set (myproject_subdirs
"." # ensure root folder gets included
stuff
things
)
# loop through subdirs to install and build up source lists
set(myproject_lib_source "")
set(myproject_tests_source "")
set(myproject_scripts_source "")
foreach(subdir ${myproject_subdirs})
# Installing headers
message(STATUS "Installing ${subdir}")
file(GLOB sub_myproject_headers "myproject/${subdir}/*.h")
install(FILES ${sub_myproject_headers} DESTINATION include/myproject/${subdir})
# add sources to main sources list
file(GLOB subdir_srcs "myproject/${subdir}/*.cpp")
list(APPEND myproject_lib_source ${subdir_srcs})
# add tests to main tests list
file(GLOB subdir_test_srcs "myproject/${subdir}/tests/*.cpp")
list(APPEND myproject_tests_source ${subdir_test_srcs})
# add scripts to main tests list
file(GLOB subdir_scripts_srcs "myproject/${subdir}/scripts/*.cpp")
list(APPEND myproject_scripts_source ${subdir_scripts_srcs})
endforeach(subdir)
set(myproject_version ${myproject_VERSION_MAJOR}.${myproject_VERSION_MINOR}.${myproject_VERSION_PATCH})
set(myproject_soversion ${myproject_VERSION_MAJOR})
message(STATUS "GTSAM Version: ${gtsam_version}")
message(STATUS "Install prefix: ${CMAKE_INSTALL_PREFIX}")
# Build library (static and shared versions)
# Include installed versions
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
add_library(${PROJECT_NAME}-shared SHARED ${myproject_lib_source})
set_target_properties(${PROJECT_NAME}-shared PROPERTIES
OUTPUT_NAME ${PROJECT_NAME}
CLEAN_DIRECT_OUTPUT 1)
install(TARGETS myproject-shared EXPORT myproject-exports LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION bin)
list(APPEND myproject_EXPORTED_TARGETS myproject-shared)
add_library(${PROJECT_NAME}-static STATIC ${myproject_lib_source})
set_target_properties(${PROJECT_NAME}-static PROPERTIES
OUTPUT_NAME ${PROJECT_NAME}
CLEAN_DIRECT_OUTPUT 1)
install(TARGETS myproject-static EXPORT myproject-exports ARCHIVE DESTINATION lib)
list(APPEND myproject_EXPORTED_TARGETS myproject-static)
install(TARGETS ${PROJECT_NAME}-shared LIBRARY DESTINATION lib )
install(TARGETS ${PROJECT_NAME}-static ARCHIVE DESTINATION lib )
# Disabled tests - subtract these from the test files
# Note the need for a full path
set(disabled_tests
"dummy"
#"${PROJECT_SOURCE_DIR}/myproject/geometry/tests/testCovarianceEllipse.cpp"
)
list(REMOVE_ITEM myproject_tests_source ${disabled_tests})
# Build static library from common sources
set(CONVENIENCE_LIB_NAME ${PROJECT_NAME})
add_library(${CONVENIENCE_LIB_NAME} STATIC example/PrintExamples.h example/PrintExamples.cpp)
target_link_libraries(${CONVENIENCE_LIB_NAME} gtsam)
###################################################################################
# Build tests
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND})
foreach(test_src_file ${myproject_tests_source})
get_filename_component(test_base ${test_src_file} NAME_WE)
message(STATUS "Adding test ${test_src_file} with base name ${test_base}" )
add_executable(${test_base} ${test_src_file})
target_link_libraries(${test_base} ${PROJECT_NAME}-${linking_mode} ${library_deps} CppUnitLite)
add_test(${test_base} ${EXECUTABLE_OUTPUT_PATH}/${test_base})
add_custom_target(${test_base}.run ${test_base} ${ARGN})
add_dependencies(check ${test_base})
endforeach(test_src_file)
# Build scripts
foreach(script_src_file ${myproject_scripts_source})
get_filename_component(script_base ${script_src_file} NAME_WE)
message(STATUS "Adding script ${script_src_file} with base name ${script_base}" )
add_executable(${script_base} ${script_src_file})
target_link_libraries(${script_base} ${PROJECT_NAME}-${linking_mode} ${library_deps} CppUnitLite)
add_custom_target(${script_base}.run ${script_base} ${ARGN})
endforeach(script_src_file)
# Build tests (CMake tracks the dependecy to link with GTSAM through our project's static library)
gtsamAddTestsGlob("example" "tests/test*.cpp" "" "${CONVENIENCE_LIB_NAME}")
###################################################################################
# Matlab wrapping
include(GtsamMatlabWrap)
set(MEX_COMMAND "mex" CACHE STRING "Command to use for executing mex (if on path, 'mex' will work)")
set(GTSAM_BUILD_MEX_BINARY_FLAGS "" CACHE STRING "Extra flags for running Matlab MEX compilation")
set(MYPROJECT_TOOLBOX_DIR "../matlab/myproject" CACHE PATH "Install folder for matlab toolbox - defaults to inside project")
set(WRAP_HEADER_PATH "${GTSAM_DIR}/../../../include")
set(MYPROJECT_TOOLBOX_FLAGS
${GTSAM_BUILD_MEX_BINARY_FLAGS} -I${PROJECT_SOURCE_DIR} -I${PROJECT_SOURCE_DIR}/myproject -I${Boost_INCLUDE_DIR} -I${MEX_INCLUDE_ROOT} -I${GTSAM_INCLUDE_DIR} -I${WRAP_HEADER_PATH} -Wl,-rpath,${CMAKE_BINARY_DIR}:${CMAKE_INSTALL_PREFIX}/lib)
set(MYPROJECT_LIBRARY_DEPS gtsam gtsam_unstable ransac myproject)
set(GTSAM_BUILD_MEX_BIN ON)
# Function to setup codegen, building and installation of the wrap toolbox
# This wrap setup function assumes that the toolbox will be installed directly,
# with predictable matlab.h sourcing
# params:
# moduleName : the name of the module, interface file must be called moduleName.h
# mexFlags : Compilation flags to be passed to the mex compiler
# modulePath : relative path to module markup header file (called moduleName.h)
# otherLibraries : list of library targets this should depend on
# toolboxPath : the directory in which to generate/build wrappers
# wrap_header_path : path to the installed wrap header
wrap_library_generic(myproject "${MYPROJECT_TOOLBOX_FLAGS}" "" "${MYPROJECT_LIBRARY_DEPS}" "${MYPROJECT_TOOLBOX_DIR}" "${WRAP_HEADER_PATH}")
# Build scripts (CMake tracks the dependecy to link with GTSAM through our project's static library)
gtsamAddExamplesGlob("*.cpp" "" "${CONVENIENCE_LIB_NAME}")
###################################################################################
# Create Install config and export files
# This config file takes the place of FindXXX.cmake scripts
include(GtsamMakeConfigFile)
GtsamMakeConfigFile(myproject)
export(TARGETS ${myproject_EXPORTED_TARGETS} FILE myproject-exports.cmake)
# Build MATLAB wrapper (CMake tracks the dependecy to link with GTSAM through our project's static library)
wrap_and_install_library("example.h" "${CONVENIENCE_LIB_NAME}" "" "")

View File

@ -0,0 +1,23 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file SayGoodbye.cpp
* @brief Example script for example project
* @author Richard Roberts
*/
#include <example/PrintExamples.h>
int main(int argc, char *argv[]) {
example::PrintExamples().sayGoodbye();
return 0;
}

View File

@ -0,0 +1,23 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file SayHello.cpp
* @brief Example script for example project
* @author Richard Roberts
*/
#include <example/PrintExamples.h>
int main(int argc, char *argv[]) {
example::PrintExamples().sayHello();
return 0;
}

View File

@ -0,0 +1,28 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file example.h
* @brief Example wrapper interface file
* @author Richard Roberts
*/
// This is an interface file for automatic MATLAB wrapper generation. See
// gtsam.h for full documentation and more examples.
namespace example {
class PrintExamples {
void sayHello() const;
void sayGoodbye() const;
};
}

View File

@ -0,0 +1,44 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file print_examples.cpp
* @brief Example library file
* @author Richard Roberts
*/
#include <iostream>
#include <example/PrintExamples.h>
namespace example {
void PrintExamples::sayHello() const {
std::cout << internal::getHelloString() << std::endl;
}
void PrintExamples::sayGoodbye() const {
std::cout << internal::getGoodbyeString() << std::endl;
}
namespace internal {
std::string getHelloString() {
return "Hello!";
}
std::string getGoodbyeString() {
return "See you soon!";
}
} // namespace internal
} // namespace example

View File

@ -0,0 +1,41 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file print_examples.h
* @brief Example library file
* @author Richard Roberts
*/
#pragma once
#include <string>
namespace example {
class PrintExamples {
public:
/// Print a greeting
void sayHello() const;
/// Print a farewell
void sayGoodbye() const;
};
namespace internal {
std::string getHelloString();
std::string getGoodbyeString();
} // namespace internal
} // namespace example

View File

@ -0,0 +1,43 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testExample.cpp
* @brief Unit tests for example
* @author Richard Roberts
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/TestableAssertions.h>
#include <example/PrintExamples.h>
using namespace gtsam;
TEST(Example, HelloString) {
const std::string expectedString = "Hello!";
EXPECT(assert_equal(expectedString, example::internal::getHelloString()));
}
TEST(Example, GoodbyeString) {
const std::string expectedString = "See you soon!";
EXPECT(assert_equal(expectedString, example::internal::getGoodbyeString()));
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -1,38 +0,0 @@
# This file should be used as a template for creating new projects using the CMake tools
# This project has the following features
# - GTSAM linking
# - Unit tests via CppUnitLite
# - Scripts
###################################################################################
# To create your own project, replace "myproject" with the actual name of your project
cmake_minimum_required(VERSION 2.6)
enable_testing()
project(myproject CXX C)
# Add the cmake subfolder to the cmake module path - necessary to use macros
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
# Ensure that local folder is searched before library folders
include_directories(BEFORE "${PROJECT_SOURCE_DIR}")
# Load build type flags and default to Debug mode
include(GtsamBuildTypes)
###################################################################################
# Find GTSAM components
find_package(GTSAM REQUIRED) # Uses installed package
include_directories(${GTSAM_INCLUDE_DIR})
###################################################################################
# Build static library from common sources
add_library(${PROJECT_NAME} STATIC ${PROJECT_NAME}/MySourceFiles.cpp)
target_link_libraries(${PROJECT_NAME} gtsam-shared)
###################################################################################
# Build tests (CMake tracks the dependecy to link with GTSAM through our project's static library)
gtsam_add_subdir_tests(${PROJECT_NAME} "${PROJECT_NAME}" "${PROJECT_NAME}" "")
###################################################################################
# Build scripts (CMake tracks the dependecy to link with GTSAM through our project's static library)
gtsam_add_executables("${PROJECT_NAME}/myScripts.cpp" "${PROJECT_NAME}" "${PROJECT_NAME}" "")

17
gtsam.h
View File

@ -1423,6 +1423,7 @@ virtual class GaussianBayesNet {
void push_back(const gtsam::GaussianBayesNet& bayesNet);
gtsam::VectorValues optimize() const;
gtsam::VectorValues optimize(gtsam::VectorValues& solutionForMissing) const;
gtsam::VectorValues optimizeGradientSearch() const;
gtsam::VectorValues gradient(const gtsam::VectorValues& x0) const;
gtsam::VectorValues gradientAtZero() const;
@ -1550,6 +1551,10 @@ char symbolChr(size_t key);
size_t symbolIndex(size_t key);
// Default keyformatter
void printKeyList (const gtsam::KeyList& keys);
void printKeyList (const gtsam::KeyList& keys, string s);
void printKeyVector(const gtsam::KeyVector& keys);
void printKeyVector(const gtsam::KeyVector& keys, string s);
void printKeySet (const gtsam::KeySet& keys);
void printKeySet (const gtsam::KeySet& keys, string s);
@ -1725,6 +1730,7 @@ class KeySet {
// structure specific methods
void insert(size_t key);
void merge(gtsam::KeySet& other);
bool erase(size_t key); // returns true if value was removed
bool count(size_t key) const; // returns true if value exists
@ -2140,6 +2146,8 @@ template<POSE, POINT, ROTATION>
virtual class BearingRangeFactor : gtsam::NoiseModelFactor {
BearingRangeFactor(size_t poseKey, size_t pointKey, const ROTATION& measuredBearing, double measuredRange, const gtsam::noiseModel::Base* noiseModel);
pair<ROTATION, double> measured() const;
// enabling serialization functionality
void serialize() const;
};
@ -2355,17 +2363,26 @@ virtual class CombinedImuFactor : gtsam::NonlinearFactor {
namespace utilities {
#include <matlab.h>
gtsam::KeyList createKeyList(Vector I);
gtsam::KeyList createKeyList(string s, Vector I);
gtsam::KeyVector createKeyVector(Vector I);
gtsam::KeyVector createKeyVector(string s, Vector I);
gtsam::KeySet createKeySet(Vector I);
gtsam::KeySet createKeySet(string s, Vector I);
Matrix extractPoint2(const gtsam::Values& values);
Matrix extractPoint3(const gtsam::Values& values);
Matrix extractPose2(const gtsam::Values& values);
gtsam::Values allPose3s(gtsam::Values& values);
Matrix extractPose3(const gtsam::Values& values);
void perturbPoint2(gtsam::Values& values, double sigma, int seed);
void perturbPose2 (gtsam::Values& values, double sigmaT, double sigmaR, int seed);
void perturbPoint3(gtsam::Values& values, double sigma, int seed);
void insertBackprojections(gtsam::Values& values, const gtsam::SimpleCamera& c, Vector J, Matrix Z, double depth);
void insertProjectionFactors(gtsam::NonlinearFactorGraph& graph, size_t i, Vector J, Matrix Z, const gtsam::noiseModel::Base* model, const gtsam::Cal3_S2* K);
void insertProjectionFactors(gtsam::NonlinearFactorGraph& graph, size_t i, Vector J, Matrix Z, const gtsam::noiseModel::Base* model, const gtsam::Cal3_S2* K, const gtsam::Pose3& body_P_sensor);
Matrix reprojectionErrors(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& values);
gtsam::Values localToWorld(const gtsam::Values& local, const gtsam::Pose2& base);
gtsam::Values localToWorld(const gtsam::Values& local, const gtsam::Pose2& base, const gtsam::KeyVector& keys);
} //\namespace utilities

View File

@ -16,13 +16,30 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN)
endif()
endforeach(eigen_dir)
# do the same for the unsupported eigen folder
file(GLOB_RECURSE unsupported_eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/unsupported/Eigen/*.h")
file(GLOB unsupported_eigen_dir_headers_all "Eigen/unsupported/Eigen/*")
foreach(unsupported_eigen_dir ${unsupported_eigen_dir_headers_all})
get_filename_component(filename ${unsupported_eigen_dir} NAME)
if (NOT ((${filename} MATCHES "CMakeLists.txt") OR (${filename} MATCHES "src") OR (${filename} MATCHES ".svn")))
list(APPEND unsupported_eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/unsupported/Eigen/${filename}")
install(FILES Eigen/unsupported/Eigen/${filename} DESTINATION include/gtsam/3rdparty/Eigen/unsupported/Eigen)
endif()
endforeach(unsupported_eigen_dir)
# Add to project source
set(eigen_headers ${eigen_headers} PARENT_SCOPE)
# set(unsupported_eigen_headers ${unsupported_eigen_headers} PARENT_SCOPE)
# install Eigen - only the headers in our 3rdparty directory
install(DIRECTORY Eigen/Eigen
DESTINATION include/gtsam/3rdparty/Eigen
FILES_MATCHING PATTERN "*.h")
install(DIRECTORY Eigen/unsupported/Eigen
DESTINATION include/gtsam/3rdparty/Eigen/unsupported/
FILES_MATCHING PATTERN "*.h")
endif()
option(GTSAM_BUILD_METIS "Build metis library" ON)

View File

@ -204,7 +204,7 @@ if(NOT MSVC)
option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF)
if(EIGEN_TEST_NEON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a"8)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a8")
message(STATUS "Enabling NEON in tests/examples")
endif()

View File

@ -14,10 +14,23 @@
#error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header
#endif
#ifndef EIGEN_NO_EIGEN2_DEPRECATED_WARNING
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
#warning "Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)"
#else
#pragma message ("Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)")
#endif
#endif // EIGEN_NO_EIGEN2_DEPRECATED_WARNING
#include "src/Core/util/DisableStupidWarnings.h"
/** \ingroup Support_modules
* \defgroup Eigen2Support_Module Eigen2 support module
*
* \warning Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.
*
* This module provides a couple of deprecated functions improving the compatibility with Eigen2.
*
* To use it, define EIGEN2_SUPPORT before including any Eigen header

View File

@ -17,6 +17,9 @@ namespace Eigen {
namespace internal {
template<typename MatrixType, int UpLo> struct LDLT_Traits;
// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
}
/** \ingroup Cholesky_Module
@ -69,7 +72,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LDLT::compute(const MatrixType&).
*/
LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
LDLT()
: m_matrix(),
m_transpositions(),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{}
/** \brief Default Constructor with memory preallocation
*
@ -81,6 +89,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(size, size),
m_transpositions(size),
m_temporary(size),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{}
@ -93,6 +102,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(matrix.rows(), matrix.cols()),
m_transpositions(matrix.rows()),
m_temporary(matrix.rows()),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{
compute(matrix);
@ -139,7 +149,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isPositive() const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
return m_sign == 1;
return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
}
#ifdef EIGEN2_SUPPORT
@ -153,7 +163,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isNegative(void) const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
return m_sign == -1;
return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
}
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
@ -235,7 +245,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
MatrixType m_matrix;
TranspositionType m_transpositions;
TmpMatrixType m_temporary;
int m_sign;
internal::SignMatrix m_sign;
bool m_isInitialized;
};
@ -246,7 +256,7 @@ template<int UpLo> struct ldlt_inplace;
template<> struct ldlt_inplace<Lower>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
using std::abs;
typedef typename MatrixType::Scalar Scalar;
@ -258,8 +268,9 @@ template<> struct ldlt_inplace<Lower>
if (size <= 1)
{
transpositions.setIdentity();
if(sign)
*sign = numext::real(mat.coeff(0,0))>0 ? 1:-1;
if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef;
else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef;
else sign = ZeroSign;
return true;
}
@ -284,7 +295,6 @@ template<> struct ldlt_inplace<Lower>
if(biggest_in_corner < cutoff)
{
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
if(sign) *sign = 0;
break;
}
@ -326,14 +336,14 @@ template<> struct ldlt_inplace<Lower>
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
A21 /= mat.coeffRef(k,k);
if(sign)
{
// LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
if(k == 0)
*sign = newSign;
else if(*sign != newSign)
*sign = 0;
RealScalar realAkk = numext::real(mat.coeffRef(k,k));
if (sign == PositiveSemiDef) {
if (realAkk < 0) sign = Indefinite;
} else if (sign == NegativeSemiDef) {
if (realAkk > 0) sign = Indefinite;
} else if (sign == ZeroSign) {
if (realAkk > 0) sign = PositiveSemiDef;
else if (realAkk < 0) sign = NegativeSemiDef;
}
}
@ -399,7 +409,7 @@ template<> struct ldlt_inplace<Lower>
template<> struct ldlt_inplace<Upper>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
Transpose<MatrixType> matt(mat);
return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
@ -445,7 +455,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_isInitialized = false;
m_temporary.resize(size);
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
m_isInitialized = true;
return *this;
@ -473,7 +483,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
for (Index i = 0; i < size; i++)
m_transpositions.coeffRef(i) = i;
m_temporary.resize(size);
m_sign = sigma>=0 ? 1 : -1;
m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
m_isInitialized = true;
}

View File

@ -58,10 +58,12 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
res.p = mat.outerIndexPtr();
res.i = mat.innerIndexPtr();
res.x = mat.valuePtr();
res.z = 0;
res.sorted = 1;
if(mat.isCompressed())
{
res.packed = 1;
res.nz = 0;
}
else
{
@ -170,6 +172,7 @@ class CholmodBase : internal::noncopyable
CholmodBase()
: m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
{
m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
cholmod_start(&m_cholmod);
}
@ -241,7 +244,7 @@ class CholmodBase : internal::noncopyable
return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
}
/** Performs a symbolic decomposition on the sparcity of \a matrix.
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
@ -265,7 +268,7 @@ class CholmodBase : internal::noncopyable
/** Performs a numeric decomposition of \a matrix
*
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
@ -302,7 +305,7 @@ class CholmodBase : internal::noncopyable
{
this->m_info = NumericalIssue;
}
// TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
cholmod_free_dense(&x_cd, &m_cholmod);
}
@ -323,7 +326,7 @@ class CholmodBase : internal::noncopyable
{
this->m_info = NumericalIssue;
}
// TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
cholmod_free_sparse(&x_cs, &m_cholmod);
}
@ -365,8 +368,8 @@ class CholmodBase : internal::noncopyable
*
* This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
* using the Cholmod library.
* This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest.
* The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
* This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest.
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
@ -412,8 +415,8 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
*
* This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
* using the Cholmod library.
* This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest.
* The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
* This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest.
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
@ -458,7 +461,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
* This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
* using the Cholmod library.
* This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
* The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
@ -501,7 +504,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
* \brief A general Cholesky factorization and solver based on Cholmod
*
* This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
* using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
* using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* This variant permits to change the underlying Cholesky method at runtime.

View File

@ -210,7 +210,7 @@ class Array
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{
Base::_check_template_params();
Base::resize(other.rows(), other.cols());
Base::_resize_to_match(other);
*this = other;
}

View File

@ -29,9 +29,9 @@ struct all_unroller
};
template<typename Derived>
struct all_unroller<Derived, 1>
struct all_unroller<Derived, 0>
{
static inline bool run(const Derived &mat) { return mat.coeff(0, 0); }
static inline bool run(const Derived &/*mat*/) { return true; }
};
template<typename Derived>
@ -55,9 +55,9 @@ struct any_unroller
};
template<typename Derived>
struct any_unroller<Derived, 1>
struct any_unroller<Derived, 0>
{
static inline bool run(const Derived &mat) { return mat.coeff(0, 0); }
static inline bool run(const Derived & /*mat*/) { return false; }
};
template<typename Derived>

View File

@ -126,36 +126,6 @@ Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
return derived();
}
/** replaces \c *this by \c *this * \a other.
*
* \returns a reference to \c *this
*/
template<typename Derived>
template<typename OtherDerived>
inline Derived&
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
return derived();
}
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
*/
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
}
/** replaces \c *this by \c *this * \a other. */
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheLeft(derived());
}
} // end namespace Eigen
#endif // EIGEN_EIGENBASE_H

View File

@ -185,21 +185,22 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
explicit_precision = fmt.precision;
}
std::streamsize old_precision = 0;
if(explicit_precision) old_precision = s.precision(explicit_precision);
bool align_cols = !(fmt.flags & DontAlignCols);
if(align_cols)
{
// compute the largest width
for(Index j = 1; j < m.cols(); ++j)
for(Index j = 0; j < m.cols(); ++j)
for(Index i = 0; i < m.rows(); ++i)
{
std::stringstream sstr;
if(explicit_precision) sstr.precision(explicit_precision);
sstr.copyfmt(s);
sstr << m.coeff(i,j);
width = std::max<Index>(width, Index(sstr.str().length()));
}
}
std::streamsize old_precision = 0;
if(explicit_precision) old_precision = s.precision(explicit_precision);
s << fmt.matPrefix;
for(Index i = 0; i < m.rows(); ++i)
{

View File

@ -304,7 +304,7 @@ class Matrix
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{
Base::_check_template_params();
Base::resize(other.rows(), other.cols());
Base::_resize_to_match(other);
// FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to
// go for pure _set() implementations, right?
*this = other;

View File

@ -510,6 +510,51 @@ template<typename Derived> class MatrixBase
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
};
/***************************************************************************
* Implementation of matrix base methods
***************************************************************************/
/** replaces \c *this by \c *this * \a other.
*
* \returns a reference to \c *this
*
* Example: \include MatrixBase_applyOnTheRight.cpp
* Output: \verbinclude MatrixBase_applyOnTheRight.out
*/
template<typename Derived>
template<typename OtherDerived>
inline Derived&
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
return derived();
}
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
*
* Example: \include MatrixBase_applyOnTheRight.cpp
* Output: \verbinclude MatrixBase_applyOnTheRight.out
*/
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheRight(derived());
}
/** replaces \c *this by \a other * \c *this.
*
* Example: \include MatrixBase_applyOnTheLeft.cpp
* Output: \verbinclude MatrixBase_applyOnTheLeft.out
*/
template<typename Derived>
template<typename OtherDerived>
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
{
other.derived().applyThisOnTheLeft(derived());
}
} // end namespace Eigen
#endif // EIGEN_MATRIXBASE_H

View File

@ -553,7 +553,8 @@ struct permut_matrix_product_retval
template<typename Dest> inline void evalTo(Dest& dst) const
{
const Index n = Side==OnTheLeft ? rows() : cols();
// FIXME we need an is_same for expression that is not sensitive to constness. For instance
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
{
// apply the permutation inplace

View File

@ -47,7 +47,10 @@ template<> struct check_rows_cols_for_overflow<Dynamic> {
}
};
template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
template <typename Derived,
typename OtherDerived = Derived,
bool IsVector = bool(Derived::IsVectorAtCompileTime) && bool(OtherDerived::IsVectorAtCompileTime)>
struct conservative_resize_like_impl;
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
@ -668,8 +671,10 @@ private:
enum { ThisConstantIsPrivateInPlainObjectBase };
};
namespace internal {
template <typename Derived, typename OtherDerived, bool IsVector>
struct internal::conservative_resize_like_impl
struct conservative_resize_like_impl
{
typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
@ -729,11 +734,14 @@ struct internal::conservative_resize_like_impl
}
};
namespace internal {
// Here, the specialization for vectors inherits from the general matrix case
// to allow calling .conservativeResize(rows,cols) on vectors.
template <typename Derived, typename OtherDerived>
struct conservative_resize_like_impl<Derived,OtherDerived,true>
: conservative_resize_like_impl<Derived,OtherDerived,false>
{
using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index size)
{

View File

@ -94,7 +94,8 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
typedef _PlainObjectType PlainObjectType;
typedef _StrideType StrideType;
enum {
Options = _Options
Options = _Options,
Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit
};
template<typename Derived> struct match {

View File

@ -17,15 +17,28 @@ namespace internal {
template<typename ExpressionType, typename Scalar>
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
Scalar max = bl.cwiseAbs().maxCoeff();
if (max>scale)
using std::max;
Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
if (maxCoeff>scale)
{
ssq = ssq * numext::abs2(scale/max);
scale = max;
invScale = Scalar(1)/scale;
ssq = ssq * numext::abs2(scale/maxCoeff);
Scalar tmp = Scalar(1)/maxCoeff;
if(tmp > NumTraits<Scalar>::highest())
{
invScale = NumTraits<Scalar>::highest();
scale = Scalar(1)/invScale;
}
// TODO if the max is much much smaller than the current scale,
else
{
scale = maxCoeff;
invScale = tmp;
}
}
// TODO if the maxCoeff is much much smaller than the current scale,
// then we can neglect this sub vector
if(scale>Scalar(0)) // if scale==0, then bl is 0
ssq += (bl*invScale).squaredNorm();
}

View File

@ -285,6 +285,7 @@ struct inplace_transpose_selector<MatrixType,false> { // non square matrix
* If you just need the transpose of a matrix, use transpose().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), adjointInPlace() */
template<typename Derived>
@ -315,6 +316,7 @@ inline void DenseBase<Derived>::transposeInPlace()
* If you just need the adjoint of a matrix, use adjoint().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), transposeInPlace() */
template<typename Derived>

View File

@ -50,7 +50,7 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0),
TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime
};
#if EIGEN_GNUC_AT_LEAST(3,4)
typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
@ -58,7 +58,8 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
#endif
enum {
CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
CoeffReadCost = TraversalSize==Dynamic ? Dynamic
: TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
};
};
}

View File

@ -442,8 +442,11 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
return _mm_xor_ps(y, sign_bit);
}
#if EIGEN_FAST_MATH
// This is based on Quake3's fast inverse square root.
// For detail see here: http://www.beyond3d.com/content/articles/8/
// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly.
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& _x)
{
@ -457,6 +460,14 @@ Packet4f psqrt<Packet4f>(const Packet4f& _x)
return pmul(_x,x);
}
#else
template<> EIGEN_STRONG_INLINE Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
#endif
template<> EIGEN_STRONG_INLINE Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
} // end namespace internal
} // end namespace Eigen

View File

@ -83,7 +83,8 @@ template<> struct packet_traits<double> : default_packet_traits
size=2,
HasDiv = 1,
HasExp = 1
HasExp = 1,
HasSqrt = 1
};
};
template<> struct packet_traits<int> : default_packet_traits
@ -507,8 +508,8 @@ template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
// for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
return aux0<aux2 ? aux0 : aux2;
}
@ -528,8 +529,8 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
// for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
return aux0>aux2 ? aux0 : aux2;
}

View File

@ -1128,6 +1128,8 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder,
enum { PacketSize = packet_traits<Scalar>::size };
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
EIGEN_UNUSED_VARIABLE(stride)
EIGEN_UNUSED_VARIABLE(offset)
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
@ -1215,6 +1217,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, Pan
::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
{
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
EIGEN_UNUSED_VARIABLE(stride)
EIGEN_UNUSED_VARIABLE(offset)
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
Index packet_cols = (cols/nr) * nr;
@ -1266,6 +1270,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, Pan
::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
{
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
EIGEN_UNUSED_VARIABLE(stride)
EIGEN_UNUSED_VARIABLE(offset)
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
Index packet_cols = (cols/nr) * nr;

View File

@ -52,11 +52,7 @@ EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsIncr,
ResScalar* res, Index
#ifdef EIGEN_INTERNAL_DEBUGGING
resIncr
#endif
, RhsScalar alpha);
ResScalar* res, Index resIncr, RhsScalar alpha);
};
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
@ -64,12 +60,9 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsIncr,
ResScalar* res, Index
#ifdef EIGEN_INTERNAL_DEBUGGING
resIncr
#endif
, RhsScalar alpha)
ResScalar* res, Index resIncr, RhsScalar alpha)
{
EIGEN_UNUSED_VARIABLE(resIncr)
eigen_internal_assert(resIncr==1);
#ifdef _EIGEN_ACCUMULATE_PACKETS
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
@ -265,7 +258,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
// process aligned result's coeffs
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
pstore(&res[i], pcj.pmadd(pload<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
else
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));

View File

@ -79,8 +79,8 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
for (Index j=FirstTriangular ? bound : 0;
j<(FirstTriangular ? size : bound);j+=2)
{
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
Scalar t0 = cjAlpha * rhs[j];
Packet ptmp0 = pset1<Packet>(t0);
@ -147,7 +147,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
}
for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
{
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
Scalar t1 = cjAlpha * rhs[j];
Scalar t2(0);

View File

@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 0
#define EIGEN_MINOR_VERSION 1
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@ -238,7 +238,12 @@
#endif
// Suppresses 'unused variable' warnings.
#define EIGEN_UNUSED_VARIABLE(var) (void)var;
namespace Eigen {
namespace internal {
template<typename T> void ignore_unused_variable(const T&) {}
}
}
#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
#if !defined(EIGEN_ASM_COMMENT)
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )

View File

@ -578,7 +578,7 @@ template<typename T> class aligned_stack_memory_handler
*/
#ifdef EIGEN_ALLOCA
#ifdef __arm__
#if defined(__arm__) || defined(_WIN32)
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
#else
#define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
@ -634,7 +634,9 @@ template<typename T> class aligned_stack_memory_handler
/* memory allocated we can safely let the default implementation handle */ \
/* this particular case. */ \
static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \
void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \
/* nothrow-new (returns zero instead of std::bad_alloc) */ \
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
void operator delete(void *ptr, const std::nothrow_t&) throw() { \
@ -729,15 +731,6 @@ public:
::new( p ) T( value );
}
// Support for c++11
#if (__cplusplus >= 201103L)
template<typename... Args>
void construct(pointer p, Args&&... args)
{
::new(p) T(std::forward<Args>(args)...);
}
#endif
void destroy( pointer p )
{
p->~T();

View File

@ -512,8 +512,7 @@ template<typename MatrixType>
template<typename OtherDerived, typename ResultType>
bool SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
{
const int rows = m_matU.rows();
ei_assert(b.rows() == rows);
ei_assert(b.rows() == m_matU.rows());
Scalar maxVal = m_sigma.cwise().abs().maxCoeff();
for (int j=0; j<b.cols(); ++j)

View File

@ -28,7 +28,7 @@ namespace Eigen {
* * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
* This corresponds to the right-multiply conventions (with right hand side frames).
*
* The returned angles are in the ranges [0:pi]x[0:pi]x[-pi:pi].
* The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi].
*
* \sa class AngleAxis
*/

View File

@ -150,10 +150,6 @@ public:
/** \returns the conjugated quaternion */
Quaternion<Scalar> conjugate() const;
/** \returns an interpolation for a constant motion between \a other and \c *this
* \a t in [0;1]
* see http://en.wikipedia.org/wiki/Slerp
*/
template<class OtherDerived> Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
@ -194,11 +190,11 @@ public:
* \brief The quaternion class used to represent 3D orientations and rotations
*
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
* \tparam _Options controls the memory alignement of the coeffecients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
* \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
*
* This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
* orientations and rotations of objects in three dimensions. Compared to other representations
* like Euler angles or 3x3 matrices, quatertions offer the following advantages:
* like Euler angles or 3x3 matrices, quaternions offer the following advantages:
* \li \b compact storage (4 scalars)
* \li \b efficient to compose (28 flops),
* \li \b stable spherical interpolation
@ -385,7 +381,7 @@ class Map<Quaternion<_Scalar>, _Options >
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
*
* The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
* The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
* \code *coeffs == {x, y, z, w} \endcode
*
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
@ -399,16 +395,16 @@ class Map<Quaternion<_Scalar>, _Options >
};
/** \ingroup Geometry_Module
* Map an unaligned array of single precision scalar as a quaternion */
* Map an unaligned array of single precision scalars as a quaternion */
typedef Map<Quaternion<float>, 0> QuaternionMapf;
/** \ingroup Geometry_Module
* Map an unaligned array of double precision scalar as a quaternion */
* Map an unaligned array of double precision scalars as a quaternion */
typedef Map<Quaternion<double>, 0> QuaternionMapd;
/** \ingroup Geometry_Module
* Map a 16-bits aligned array of double precision scalars as a quaternion */
* Map a 16-byte aligned array of single precision scalars as a quaternion */
typedef Map<Quaternion<float>, Aligned> QuaternionMapAlignedf;
/** \ingroup Geometry_Module
* Map a 16-bits aligned array of double precision scalars as a quaternion */
* Map a 16-byte aligned array of double precision scalars as a quaternion */
typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd;
/***************************************************************************
@ -579,7 +575,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
Scalar c = v1.dot(v0);
// if dot == -1, vectors are nearly opposites
// => accuraletly compute the rotation axis by computing the
// => accurately compute the rotation axis by computing the
// intersection of the two planes. This is done by solving:
// x^T v0 = 0
// x^T v1 = 0
@ -677,8 +673,13 @@ QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& oth
return static_cast<Scalar>(2 * acos(d));
}
/** \returns the spherical linear interpolation between the two quaternions
* \c *this and \a other at the parameter \a t
* \c *this and \a other at the parameter \a t in [0;1].
*
* This represents an interpolation for a constant motion between \c *this and \a other,
* see also http://en.wikipedia.org/wiki/Slerp.
*/
template <class Derived>
template <class OtherDerived>

View File

@ -530,9 +530,9 @@ public:
inline Transform& operator=(const UniformScaling<Scalar>& t);
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode))> operator*(const UniformScaling<Scalar>& s) const
{
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode)),Options> res = *this;
res.scale(s.factor());
return res;
}

View File

@ -349,7 +349,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
: NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
: NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
}
/** \returns the number of nonzero pivots in the QR decomposition.

View File

@ -63,9 +63,10 @@ template<typename _MatrixType> class FullPivHouseholderQR
typedef typename MatrixType::Index Index;
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<Index, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType;
typedef Matrix<Index, 1,
EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;
@ -93,10 +94,10 @@ template<typename _MatrixType> class FullPivHouseholderQR
FullPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs((std::min)(rows,cols)),
m_rows_transpositions(rows),
m_cols_transpositions(cols),
m_rows_transpositions((std::min)(rows,cols)),
m_cols_transpositions((std::min)(rows,cols)),
m_cols_permutation(cols),
m_temp((std::min)(rows,cols)),
m_temp(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
@ -115,10 +116,10 @@ template<typename _MatrixType> class FullPivHouseholderQR
FullPivHouseholderQR(const MatrixType& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
m_rows_transpositions(matrix.rows()),
m_cols_transpositions(matrix.cols()),
m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_permutation(matrix.cols()),
m_temp((std::min)(matrix.rows(), matrix.cols())),
m_temp(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
@ -126,11 +127,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
}
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
* *this is the QR decomposition, if any exists.
* \c *this is the QR decomposition.
*
* \param b the right-hand-side of the equation to solve.
*
* \returns a solution.
* \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A,
* and an arbitrary solution otherwise.
*
* \note The case where b is a matrix is not yet implemented. Also, this
* code is space inefficient.
@ -172,7 +174,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
}
/** \returns a const reference to the vector of indices representing the rows transpositions */
const IntColVectorType& rowsTranspositions() const
const IntDiagSizeVectorType& rowsTranspositions() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_rows_transpositions;
@ -344,7 +346,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
: NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
: NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
}
/** \returns the number of nonzero pivots in the QR decomposition.
@ -368,8 +370,8 @@ template<typename _MatrixType> class FullPivHouseholderQR
protected:
MatrixType m_qr;
HCoeffsType m_hCoeffs;
IntColVectorType m_rows_transpositions;
IntRowVectorType m_cols_transpositions;
IntDiagSizeVectorType m_rows_transpositions;
IntDiagSizeVectorType m_cols_transpositions;
PermutationType m_cols_permutation;
RowVectorType m_temp;
bool m_isInitialized, m_usePrescribedThreshold;
@ -415,10 +417,10 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
m_temp.resize(cols);
m_precision = NumTraits<Scalar>::epsilon() * size;
m_precision = NumTraits<Scalar>::epsilon() * RealScalar(size);
m_rows_transpositions.resize(matrix.rows());
m_cols_transpositions.resize(matrix.cols());
m_rows_transpositions.resize(size);
m_cols_transpositions.resize(size);
Index number_of_transpositions = 0;
RealScalar biggest(0);
@ -516,17 +518,6 @@ struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
dec().hCoeffs().coeff(k), &temp.coeffRef(0));
}
if(!dec().isSurjective())
{
// is c is in the image of R ?
RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
// FIXME brain dead
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * (std::min)(rows,cols);
// this internal:: prefix is needed by at least gcc 3.4 and ICC
if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
return;
}
dec().matrixQR()
.topLeftCorner(dec().rank(), dec().rank())
.template triangularView<Upper>()
@ -548,14 +539,14 @@ template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType
{
public:
typedef typename MatrixType::Index Index;
typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1,
MatrixType::MaxRowsAtCompileTime> WorkVectorType;
FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr,
const HCoeffsType& hCoeffs,
const IntColVectorType& rowsTranspositions)
const IntDiagSizeVectorType& rowsTranspositions)
: m_qr(qr),
m_hCoeffs(hCoeffs),
m_rowsTranspositions(rowsTranspositions)
@ -595,7 +586,7 @@ public:
protected:
typename MatrixType::Nested m_qr;
typename HCoeffsType::Nested m_hCoeffs;
typename IntColVectorType::Nested m_rowsTranspositions;
typename IntDiagSizeVectorType::Nested m_rowsTranspositions;
};
} // end namespace internal

View File

@ -64,7 +64,8 @@ class SPQR
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
public:
SPQR()
: m_ordering(SPQR_ORDERING_DEFAULT),
: m_isInitialized(false),
m_ordering(SPQR_ORDERING_DEFAULT),
m_allow_tol(SPQR_DEFAULT_TOL),
m_tolerance (NumTraits<Scalar>::epsilon())
{
@ -72,7 +73,8 @@ class SPQR
}
SPQR(const _MatrixType& matrix)
: m_ordering(SPQR_ORDERING_DEFAULT),
: m_isInitialized(false),
m_ordering(SPQR_ORDERING_DEFAULT),
m_allow_tol(SPQR_DEFAULT_TOL),
m_tolerance (NumTraits<Scalar>::epsilon())
{
@ -82,16 +84,22 @@ class SPQR
~SPQR()
{
// Calls SuiteSparseQR_free()
SPQR_free();
cholmod_l_finish(&m_cc);
}
void SPQR_free()
{
cholmod_l_free_sparse(&m_H, &m_cc);
cholmod_l_free_sparse(&m_cR, &m_cc);
cholmod_l_free_dense(&m_HTau, &m_cc);
std::free(m_E);
std::free(m_HPinv);
cholmod_l_finish(&m_cc);
}
void compute(const _MatrixType& matrix)
{
if(m_isInitialized) SPQR_free();
MatrixType mat(matrix);
cholmod_sparse A;
A = viewAsCholmod(mat);
@ -139,7 +147,7 @@ class SPQR
eigen_assert(b.cols()==1 && "This method is for vectors only");
//Compute Q^T * b
Dest y;
typename Dest::PlainObject y;
y = matrixQ().transpose() * b;
// Solves with the triangular matrix R
Index rk = this->rank();

View File

@ -380,7 +380,10 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
if(work_matrix.coeff(q,q)!=Scalar(0))
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
else
z = Scalar(0);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}

View File

@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
}
// unordered insertion
for(int k=0; k<nnz; ++k)
for(Index k=0; k<nnz; ++k)
{
int i = indices[k];
Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
@ -76,8 +76,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
#if 0
// alternative ordered insertion code:
int t200 = rows/(log2(200)*1.39);
int t = (rows*100)/139;
Index t200 = rows/(log2(200)*1.39);
Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement fast sort algorithms for very small nnz
@ -90,9 +90,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
if(true)
{
if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
for(int k=0; k<nnz; ++k)
for(Index k=0; k<nnz; ++k)
{
int i = indices[k];
Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
@ -100,7 +100,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
else
{
// dense path
for(int i=0; i<rows; ++i)
for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
@ -134,8 +134,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
// sort the non zeros:
@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix rhsRow = rhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix lhsRow = lhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix lhsCol = lhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix rhsCol = rhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:

View File

@ -51,6 +51,8 @@ class MappedSparseMatrix
inline Index innerSize() const { return m_innerSize; }
inline Index outerSize() const { return m_outerSize; }
bool isCompressed() const { return true; }
//----------------------------------------
// direct access interface
inline const Scalar* valuePtr() const { return m_values; }

View File

@ -66,6 +66,8 @@ public:
typename XprType::Nested m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
};
@ -392,6 +394,8 @@ public:
friend class InnerIterator;
friend class ReverseInnerIterator;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
typename XprType::Nested m_matrix;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;

View File

@ -63,6 +63,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
typedef typename MatrixType::Index Index;
Index nc = mat.cols(); // Number of columns
Index m = mat.rows();
Index diagSize = (std::min)(nc,m);
IndexVector root(nc); // root of subtree of etree
root.setZero();
IndexVector pp(nc); // disjoint sets
@ -72,7 +73,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
Index row,col;
firstRowElt.resize(m);
firstRowElt.setConstant(nc);
firstRowElt.segment(0, nc).setLinSpaced(nc, 0, nc-1);
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
bool found_diag;
for (col = 0; col < nc; col++)
{
@ -91,7 +92,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
Index rset, cset, rroot;
for (col = 0; col < nc; col++)
{
found_diag = false;
found_diag = col>=m;
pp(col) = col;
cset = col;
root(cset) = col;
@ -105,6 +106,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
Index i = col;
if(it) i = it.index();
if (i == col) found_diag = true;
row = firstRowElt(i);
if (row >= col) continue;
rset = internal::etree_find(row, pp); // Find the name of the set containing row

View File

@ -125,7 +125,7 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
inline Scalar value() const { return Base::value() * m_factor; }
protected:
int m_outer;
Index m_outer;
Scalar m_factor;
};
@ -155,7 +155,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
{
for(Index c=0; c<rhs.cols(); ++c)
{
int n = lhs.outerSize();
Index n = lhs.outerSize();
for(Index j=0; j<n; ++j)
{
typename Res::Scalar tmp(0);

View File

@ -223,7 +223,7 @@ class SparseMatrix
if(isCompressed())
{
reserve(VectorXi::Constant(outerSize(), 2));
reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
}
return insertUncompressed(row,col);
}
@ -402,7 +402,7 @@ class SparseMatrix
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
@ -480,7 +480,7 @@ class SparseMatrix
if(m_innerNonZeros != 0)
return;
m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
for (int i = 0; i < m_outerSize; i++)
for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
@ -752,8 +752,8 @@ class SparseMatrix
else
for (Index i=0; i<m.outerSize(); ++i)
{
int p = m.m_outerIndex[i];
int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index p = m.m_outerIndex[i];
Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k)
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
@ -939,12 +939,13 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
EIGEN_UNUSED_VARIABLE(Options);
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::Index Index;
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
if(begin<end)
if(begin!=end)
{
// pass 1: count the nnz per inner-vector
VectorXi wi(trMat.outerSize());
Matrix<Index,Dynamic,1> wi(trMat.outerSize());
wi.setZero();
for(InputIterator it(begin); it!=end; ++it)
{
@ -1018,11 +1019,11 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
VectorXi wi(innerSize());
Matrix<Index,Dynamic,1> wi(innerSize());
wi.fill(-1);
Index count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(int j=0; j<outerSize(); ++j)
for(Index j=0; j<outerSize(); ++j)
{
Index start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
@ -1081,7 +1082,7 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
// prefix sum
Index count = 0;
VectorXi positions(dest.outerSize());
Matrix<Index,Dynamic,1> positions(dest.outerSize());
for (Index j=0; j<dest.outerSize(); ++j)
{
Index tmp = dest.m_outerIndex[j];

View File

@ -302,8 +302,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
}
else
{
SparseMatrix<Scalar, RowMajorBit> trans = m;
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
}
}
return s;

View File

@ -57,7 +57,7 @@ struct permut_sparsematrix_product_retval
if(MoveOuter)
{
SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
VectorXi sizes(m_matrix.outerSize());
Matrix<Index,Dynamic,1> sizes(m_matrix.outerSize());
for(Index j=0; j<m_matrix.outerSize(); ++j)
{
Index jp = m_permutation.indices().coeff(j);
@ -77,7 +77,7 @@ struct permut_sparsematrix_product_retval
else
{
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
VectorXi sizes(tmp.outerSize());
Matrix<Index,Dynamic,1> sizes(tmp.outerSize());
sizes.setZero();
PermutationMatrix<Dynamic,Dynamic,Index> perm;
if((Side==OnTheLeft) ^ Transposed)

View File

@ -16,6 +16,7 @@ template<typename Lhs, typename Rhs>
struct SparseSparseProductReturnType
{
typedef typename internal::traits<Lhs>::Scalar Scalar;
typedef typename internal::traits<Lhs>::Index Index;
enum {
LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
@ -24,11 +25,11 @@ struct SparseSparseProductReturnType
};
typedef typename internal::conditional<TransposeLhs,
SparseMatrix<Scalar,0>,
SparseMatrix<Scalar,0,Index>,
typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
typedef typename internal::conditional<TransposeRhs,
SparseMatrix<Scalar,0>,
SparseMatrix<Scalar,0,Index>,
typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
typedef SparseSparseProduct<LhsNested, RhsNested> Type;

View File

@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
//int size = lhs.outerSize();
//Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
@ -100,7 +100,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
@ -126,10 +126,11 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
ColMajorMatrix colLhs(lhs);
ColMajorMatrix colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;

View File

@ -143,7 +143,7 @@ template<typename T> struct plain_matrix_type<T,Sparse>
*
* \sa SparseMatrix::setFromTriplets()
*/
template<typename Scalar, typename Index=unsigned int>
template<typename Scalar, typename Index=typename SparseMatrix<Scalar>::Index >
class Triplet
{
public:

View File

@ -219,9 +219,9 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
}
template<typename Rhs, typename Dest>
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &_X) const
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
{
Dest& X(_X.derived());
Dest& X(X_base.derived());
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
@ -229,8 +229,10 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
// Permute the right hand side to form X = Pr*B
// on return, X is overwritten by the computed solution
X.resize(B.rows(),B.cols());
// this ugly const_cast_derived() helps to detect aliasing when applying the permutations
for(Index j = 0; j < B.cols(); ++j)
X.col(j) = rowsPermutation() * B.col(j);
X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
//Forward substitution with L
this->matrixL().solveInPlace(X);

View File

@ -70,23 +70,30 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
if(num_expansions == 0 || keep_prev)
new_len = length ; // First time allocate requested
else
new_len = Index(alpha * length);
new_len = (std::max)(length+1,Index(alpha * length));
VectorType old_vec; // Temporary vector to hold the previous values
if (nbElts > 0 )
old_vec = vec.segment(0,nbElts);
//Allocate or expand the current vector
#ifdef EIGEN_EXCEPTIONS
try
#endif
{
vec.resize(new_len);
}
#ifdef EIGEN_EXCEPTIONS
catch(std::bad_alloc& )
#else
if(!vec.size())
#endif
{
if (!num_expansions)
{
// First time to allocate from LUMemInit()
throw; // Pass the exception to LUMemInit() which has a try... catch block
// Let LUMemInit() deals with it.
return -1;
}
if (keep_prev)
{
@ -100,12 +107,18 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
do
{
alpha = (alpha + 1)/2;
new_len = Index(alpha * length);
new_len = (std::max)(length+1,Index(alpha * length));
#ifdef EIGEN_EXCEPTIONS
try
#endif
{
vec.resize(new_len);
}
#ifdef EIGEN_EXCEPTIONS
catch(std::bad_alloc& )
#else
if (!vec.size())
#endif
{
tries += 1;
if ( tries > 10) return new_len;
@ -140,9 +153,8 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
{
Index& num_expansions = glu.num_expansions; //No memory expansions so far
num_expansions = 0;
glu.nzumax = glu.nzlumax = (std::max)(fillratio * annz, m*n); // estimated number of nonzeros in U
glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U
glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor
// Return the estimated size to the user if necessary
Index tempSpace;
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
@ -166,14 +178,10 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
// Reserve memory for L/U factors
do
{
try
{
expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions);
expand<ScalarVector>(glu.ucol,glu.nzumax, 0, 0, num_expansions);
expand<IndexVector>(glu.lsub,glu.nzlmax, 0, 0, num_expansions);
expand<IndexVector>(glu.usub,glu.nzumax, 0, 1, num_expansions);
}
catch(std::bad_alloc& )
if( (expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0)
|| (expand<ScalarVector>(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0)
|| (expand<IndexVector> (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0)
|| (expand<IndexVector> (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) )
{
//Reduce the estimated size and retry
glu.nzlumax /= 2;
@ -181,11 +189,8 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
glu.nzlmax /= 2;
if (glu.nzlumax < annz ) return glu.nzlumax;
}
} while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());
++num_expansions;
return 0;

View File

@ -161,8 +161,9 @@ class SparseQR
b = y;
// Solve with the triangular matrix R
y.resize((std::max)(cols(),Index(y.rows())),y.cols());
y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
y.bottomRows(y.size()-rank).setZero();
y.bottomRows(y.rows()-rank).setZero();
// Apply the column permutation
if (m_perm_c.size()) dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
@ -338,7 +339,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
Index nonzeroCol = 0; // Record the number of valid pivots
// Left looking rank-revealing QR factorization: compute a column of R and Q at a time
for (Index col = 0; col < n; ++col)
for (Index col = 0; col < (std::min)(n,m); ++col)
{
mark.setConstant(-1);
m_R.startVec(col);
@ -346,7 +347,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
mark(nonzeroCol) = col;
Qidx(0) = nonzeroCol;
nzcolR = 0; nzcolQ = 1;
found_diag = false;
found_diag = col>=m;
tval.setZero();
// Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
@ -571,6 +572,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
if(tau==Scalar(0)) continue;
tau = tau * m_qr.m_hcoeffs(k);
res.col(j) -= tau * m_qr.m_Q.col(k);
}
@ -578,7 +580,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
}
else
{
eigen_assert(m_qr.m_Q.cols() == m_other.rows() && "Non conforming object sizes");
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
// Compute res = Q' * other column by column
for(Index j = 0; j < res.cols(); j++)
{
@ -586,6 +588,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
if(tau==Scalar(0)) continue;
tau = tau * m_qr.m_hcoeffs(k);
res.col(j) -= tau * m_qr.m_Q.col(k);
}

View File

@ -113,13 +113,13 @@ inline const Block<const Derived, CRows, CCols> topRightCorner() const
/** \returns an expression of a top-right corner of *this.
*
* \tparam CRows number of rows in corner as specified at compile time
* \tparam CCols number of columns in corner as specified at compile time
* \param cRows number of rows in corner as specified at run time
* \param cCols number of columns in corner as specified at run time
* \tparam CRows number of rows in corner as specified at compile-time
* \tparam CCols number of columns in corner as specified at compile-time
* \param cRows number of rows in corner as specified at run-time
* \param cCols number of columns in corner as specified at run-time
*
* This function is mainly useful for corners where the number of rows is specified at compile time
* and the number of columns is specified at run time, or vice versa. The compile-time and run-time
* This function is mainly useful for corners where the number of rows is specified at compile-time
* and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
* information should not contradict. In other words, \a cRows should equal \a CRows unless
* \a CRows is \a Dynamic, and the same for the number of columns.
*
@ -188,13 +188,13 @@ inline const Block<const Derived, CRows, CCols> topLeftCorner() const
/** \returns an expression of a top-left corner of *this.
*
* \tparam CRows number of rows in corner as specified at compile time
* \tparam CCols number of columns in corner as specified at compile time
* \param cRows number of rows in corner as specified at run time
* \param cCols number of columns in corner as specified at run time
* \tparam CRows number of rows in corner as specified at compile-time
* \tparam CCols number of columns in corner as specified at compile-time
* \param cRows number of rows in corner as specified at run-time
* \param cCols number of columns in corner as specified at run-time
*
* This function is mainly useful for corners where the number of rows is specified at compile time
* and the number of columns is specified at run time, or vice versa. The compile-time and run-time
* This function is mainly useful for corners where the number of rows is specified at compile-time
* and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
* information should not contradict. In other words, \a cRows should equal \a CRows unless
* \a CRows is \a Dynamic, and the same for the number of columns.
*
@ -263,13 +263,13 @@ inline const Block<const Derived, CRows, CCols> bottomRightCorner() const
/** \returns an expression of a bottom-right corner of *this.
*
* \tparam CRows number of rows in corner as specified at compile time
* \tparam CCols number of columns in corner as specified at compile time
* \param cRows number of rows in corner as specified at run time
* \param cCols number of columns in corner as specified at run time
* \tparam CRows number of rows in corner as specified at compile-time
* \tparam CCols number of columns in corner as specified at compile-time
* \param cRows number of rows in corner as specified at run-time
* \param cCols number of columns in corner as specified at run-time
*
* This function is mainly useful for corners where the number of rows is specified at compile time
* and the number of columns is specified at run time, or vice versa. The compile-time and run-time
* This function is mainly useful for corners where the number of rows is specified at compile-time
* and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
* information should not contradict. In other words, \a cRows should equal \a CRows unless
* \a CRows is \a Dynamic, and the same for the number of columns.
*
@ -338,13 +338,13 @@ inline const Block<const Derived, CRows, CCols> bottomLeftCorner() const
/** \returns an expression of a bottom-left corner of *this.
*
* \tparam CRows number of rows in corner as specified at compile time
* \tparam CCols number of columns in corner as specified at compile time
* \param cRows number of rows in corner as specified at run time
* \param cCols number of columns in corner as specified at run time
* \tparam CRows number of rows in corner as specified at compile-time
* \tparam CCols number of columns in corner as specified at compile-time
* \param cRows number of rows in corner as specified at run-time
* \param cCols number of columns in corner as specified at run-time
*
* This function is mainly useful for corners where the number of rows is specified at compile time
* and the number of columns is specified at run time, or vice versa. The compile-time and run-time
* This function is mainly useful for corners where the number of rows is specified at compile-time
* and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
* information should not contradict. In other words, \a cRows should equal \a CRows unless
* \a CRows is \a Dynamic, and the same for the number of columns.
*
@ -390,7 +390,11 @@ inline ConstRowsBlockXpr topRows(Index n) const
/** \returns a block consisting of the top rows of *this.
*
* \tparam N the number of rows in the block
* \tparam N the number of rows in the block as specified at compile-time
* \param n the number of rows in the block as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_topRows.cpp
* Output: \verbinclude MatrixBase_template_int_topRows.out
@ -398,16 +402,16 @@ inline ConstRowsBlockXpr topRows(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
inline typename NRowsBlockXpr<N>::Type topRows()
inline typename NRowsBlockXpr<N>::Type topRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, N, cols());
return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
/** This is the const version of topRows<int>().*/
template<int N>
inline typename ConstNRowsBlockXpr<N>::Type topRows() const
inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, N, cols());
return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
@ -434,7 +438,11 @@ inline ConstRowsBlockXpr bottomRows(Index n) const
/** \returns a block consisting of the bottom rows of *this.
*
* \tparam N the number of rows in the block
* \tparam N the number of rows in the block as specified at compile-time
* \param n the number of rows in the block as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_bottomRows.cpp
* Output: \verbinclude MatrixBase_template_int_bottomRows.out
@ -442,16 +450,16 @@ inline ConstRowsBlockXpr bottomRows(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
inline typename NRowsBlockXpr<N>::Type bottomRows()
inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), rows() - N, 0, N, cols());
return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
/** This is the const version of bottomRows<int>().*/
template<int N>
inline typename ConstNRowsBlockXpr<N>::Type bottomRows() const
inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - N, 0, N, cols());
return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
@ -459,28 +467,32 @@ inline typename ConstNRowsBlockXpr<N>::Type bottomRows() const
/** \returns a block consisting of a range of rows of *this.
*
* \param startRow the index of the first row in the block
* \param numRows the number of rows in the block
* \param n the number of rows in the block
*
* Example: \include DenseBase_middleRows_int.cpp
* Output: \verbinclude DenseBase_middleRows_int.out
*
* \sa class Block, block(Index,Index,Index,Index)
*/
inline RowsBlockXpr middleRows(Index startRow, Index numRows)
inline RowsBlockXpr middleRows(Index startRow, Index n)
{
return RowsBlockXpr(derived(), startRow, 0, numRows, cols());
return RowsBlockXpr(derived(), startRow, 0, n, cols());
}
/** This is the const version of middleRows(Index,Index).*/
inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const
inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const
{
return ConstRowsBlockXpr(derived(), startRow, 0, numRows, cols());
return ConstRowsBlockXpr(derived(), startRow, 0, n, cols());
}
/** \returns a block consisting of a range of rows of *this.
*
* \tparam N the number of rows in the block
* \tparam N the number of rows in the block as specified at compile-time
* \param startRow the index of the first row in the block
* \param n the number of rows in the block as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include DenseBase_template_int_middleRows.cpp
* Output: \verbinclude DenseBase_template_int_middleRows.out
@ -488,16 +500,16 @@ inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow)
inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
{
return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, N, cols());
return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
/** This is the const version of middleRows<int>().*/
template<int N>
inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow) const
inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
{
return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, N, cols());
return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
@ -524,7 +536,11 @@ inline ConstColsBlockXpr leftCols(Index n) const
/** \returns a block consisting of the left columns of *this.
*
* \tparam N the number of columns in the block
* \tparam N the number of columns in the block as specified at compile-time
* \param n the number of columns in the block as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_leftCols.cpp
* Output: \verbinclude MatrixBase_template_int_leftCols.out
@ -532,16 +548,16 @@ inline ConstColsBlockXpr leftCols(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
inline typename NColsBlockXpr<N>::Type leftCols()
inline typename NColsBlockXpr<N>::Type leftCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), N);
return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
/** This is the const version of leftCols<int>().*/
template<int N>
inline typename ConstNColsBlockXpr<N>::Type leftCols() const
inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), N);
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
@ -568,7 +584,11 @@ inline ConstColsBlockXpr rightCols(Index n) const
/** \returns a block consisting of the right columns of *this.
*
* \tparam N the number of columns in the block
* \tparam N the number of columns in the block as specified at compile-time
* \param n the number of columns in the block as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_rightCols.cpp
* Output: \verbinclude MatrixBase_template_int_rightCols.out
@ -576,16 +596,16 @@ inline ConstColsBlockXpr rightCols(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
inline typename NColsBlockXpr<N>::Type rightCols()
inline typename NColsBlockXpr<N>::Type rightCols(Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - N, rows(), N);
return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
/** This is the const version of rightCols<int>().*/
template<int N>
inline typename ConstNColsBlockXpr<N>::Type rightCols() const
inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - N, rows(), N);
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
@ -613,8 +633,12 @@ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
/** \returns a block consisting of a range of columns of *this.
*
* \tparam N the number of columns in the block
* \tparam N the number of columns in the block as specified at compile-time
* \param startCol the index of the first column in the block
* \param n the number of columns in the block as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include DenseBase_template_int_middleCols.cpp
* Output: \verbinclude DenseBase_template_int_middleCols.out
@ -622,16 +646,16 @@ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
inline typename NColsBlockXpr<N>::Type middleCols(Index startCol)
inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
{
return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), N);
return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
/** This is the const version of middleCols<int>().*/
template<int N>
inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol) const
inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
{
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), N);
return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
@ -667,15 +691,15 @@ inline const Block<const Derived, BlockRows, BlockCols> block(Index startRow, In
/** \returns an expression of a block in *this.
*
* \tparam BlockRows number of rows in block as specified at compile time
* \tparam BlockCols number of columns in block as specified at compile time
* \tparam BlockRows number of rows in block as specified at compile-time
* \tparam BlockCols number of columns in block as specified at compile-time
* \param startRow the first row in the block
* \param startCol the first column in the block
* \param blockRows number of rows in block as specified at run time
* \param blockCols number of columns in block as specified at run time
* \param blockRows number of rows in block as specified at run-time
* \param blockCols number of columns in block as specified at run-time
*
* This function is mainly useful for blocks where the number of rows is specified at compile time
* and the number of columns is specified at run time, or vice versa. The compile-time and run-time
* This function is mainly useful for blocks where the number of rows is specified at compile-time
* and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
* information should not contradict. In other words, \a blockRows should equal \a BlockRows unless
* \a BlockRows is \a Dynamic, and the same for the number of columns.
*
@ -738,7 +762,7 @@ inline ConstRowXpr row(Index i) const
* \only_for_vectors
*
* \param start the first coefficient in the segment
* \param vecSize the number of coefficients in the segment
* \param n the number of coefficients in the segment
*
* Example: \include MatrixBase_segment_int_int.cpp
* Output: \verbinclude MatrixBase_segment_int_int.out
@ -749,25 +773,25 @@ inline ConstRowXpr row(Index i) const
*
* \sa class Block, segment(Index)
*/
inline SegmentReturnType segment(Index start, Index vecSize)
inline SegmentReturnType segment(Index start, Index n)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), start, vecSize);
return SegmentReturnType(derived(), start, n);
}
/** This is the const version of segment(Index,Index).*/
inline ConstSegmentReturnType segment(Index start, Index vecSize) const
inline ConstSegmentReturnType segment(Index start, Index n) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), start, vecSize);
return ConstSegmentReturnType(derived(), start, n);
}
/** \returns a dynamic-size expression of the first coefficients of *this.
*
* \only_for_vectors
*
* \param vecSize the number of coefficients in the block
* \param n the number of coefficients in the segment
*
* Example: \include MatrixBase_start_int.cpp
* Output: \verbinclude MatrixBase_start_int.out
@ -778,25 +802,24 @@ inline ConstSegmentReturnType segment(Index start, Index vecSize) const
*
* \sa class Block, block(Index,Index)
*/
inline SegmentReturnType head(Index vecSize)
inline SegmentReturnType head(Index n)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), 0, vecSize);
return SegmentReturnType(derived(), 0, n);
}
/** This is the const version of head(Index).*/
inline ConstSegmentReturnType
head(Index vecSize) const
inline ConstSegmentReturnType head(Index n) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), 0, vecSize);
return ConstSegmentReturnType(derived(), 0, n);
}
/** \returns a dynamic-size expression of the last coefficients of *this.
*
* \only_for_vectors
*
* \param vecSize the number of coefficients in the block
* \param n the number of coefficients in the segment
*
* Example: \include MatrixBase_end_int.cpp
* Output: \verbinclude MatrixBase_end_int.out
@ -807,95 +830,106 @@ inline ConstSegmentReturnType
*
* \sa class Block, block(Index,Index)
*/
inline SegmentReturnType tail(Index vecSize)
inline SegmentReturnType tail(Index n)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return SegmentReturnType(derived(), this->size() - vecSize, vecSize);
return SegmentReturnType(derived(), this->size() - n, n);
}
/** This is the const version of tail(Index).*/
inline ConstSegmentReturnType tail(Index vecSize) const
inline ConstSegmentReturnType tail(Index n) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return ConstSegmentReturnType(derived(), this->size() - vecSize, vecSize);
return ConstSegmentReturnType(derived(), this->size() - n, n);
}
/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
*
* \only_for_vectors
*
* The template parameter \a Size is the number of coefficients in the block
* \tparam N the number of coefficients in the segment as specified at compile-time
* \param start the index of the first element in the segment
* \param n the number of coefficients in the segment as specified at compile-time
*
* \param start the index of the first element of the sub-vector
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_segment.cpp
* Output: \verbinclude MatrixBase_template_int_segment.out
*
* \sa class Block
*/
template<int Size>
inline typename FixedSegmentReturnType<Size>::Type segment(Index start)
template<int N>
inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<Size>::Type(derived(), start);
return typename FixedSegmentReturnType<N>::Type(derived(), start, n);
}
/** This is the const version of segment<int>(Index).*/
template<int Size>
inline typename ConstFixedSegmentReturnType<Size>::Type segment(Index start) const
template<int N>
inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);
}
/** \returns a fixed-size expression of the first coefficients of *this.
*
* \only_for_vectors
*
* The template parameter \a Size is the number of coefficients in the block
* \tparam N the number of coefficients in the segment as specified at compile-time
* \param n the number of coefficients in the segment as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_start.cpp
* Output: \verbinclude MatrixBase_template_int_start.out
*
* \sa class Block
*/
template<int Size>
inline typename FixedSegmentReturnType<Size>::Type head()
template<int N>
inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);
}
/** This is the const version of head<int>().*/
template<int Size>
inline typename ConstFixedSegmentReturnType<Size>::Type head() const
template<int N>
inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);
}
/** \returns a fixed-size expression of the last coefficients of *this.
*
* \only_for_vectors
*
* The template parameter \a Size is the number of coefficients in the block
* \tparam N the number of coefficients in the segment as specified at compile-time
* \param n the number of coefficients in the segment as specified at run-time
*
* The compile-time and run-time information should not contradict. In other words,
* \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_end.cpp
* Output: \verbinclude MatrixBase_template_int_end.out
*
* \sa class Block
*/
template<int Size>
inline typename FixedSegmentReturnType<Size>::Type tail()
template<int N>
inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);
}
/** This is the const version of tail<int>.*/
template<int Size>
inline typename ConstFixedSegmentReturnType<Size>::Type tail() const
template<int N>
inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
}

View File

@ -83,7 +83,7 @@ cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const ConstantReturnType>
cwiseMin(const Scalar &other) const
{
return cwiseMin(Derived::PlainObject::Constant(rows(), cols(), other));
return cwiseMin(Derived::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise max of *this and \a other
@ -107,7 +107,7 @@ cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const ConstantReturnType>
cwiseMax(const Scalar &other) const
{
return cwiseMax(Derived::PlainObject::Constant(rows(), cols(), other));
return cwiseMax(Derived::Constant(rows(), cols(), other));
}

View File

@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
if(EIGEN_Fortran_COMPILER_WORKS)
enable_language(Fortran OPTIONAL)
if(NOT CMAKE_Fortran_COMPILER)
set(EIGEN_Fortran_COMPILER_WORKS OFF)
endif()
endif()
add_custom_target(blas)

View File

@ -23,7 +23,7 @@ function(workaround_9220 language language_works)
#message("DEBUG: language = ${language}")
set(text
"project(test NONE)
cmake_minimum_required(VERSION 2.6.0)
cmake_minimum_required(VERSION 2.8.0)
set (CMAKE_Fortran_FLAGS \"${CMAKE_Fortran_FLAGS}\")
set (CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS}\")
enable_language(${language} OPTIONAL)

View File

@ -2,6 +2,8 @@ namespace Eigen {
/** \page Eigen2ToEigen3 Porting from Eigen2 to Eigen3
<div class="bigwarning">Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.</div>
This page lists the most important API changes between Eigen2 and Eigen3,
and gives tips to help porting your application from Eigen2 to Eigen3.

View File

@ -2,6 +2,8 @@ namespace Eigen {
/** \page Eigen2SupportModes Eigen 2 support modes
<div class="bigwarning">Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.</div>
This page documents the Eigen2 support modes, a powerful tool to help migrating your project from Eigen 2 to Eigen 3.
Don't miss our page on \ref Eigen2ToEigen3 "API changes" between Eigen 2 and Eigen 3.

View File

@ -16,8 +16,8 @@ double s;
// Basic usage
// Eigen // Matlab // comments
x.size() // length(x) // vector size
C.rows() // size(C)(1) // number of rows
C.cols() // size(C)(2) // number of columns
C.rows() // size(C,1) // number of rows
C.cols() // size(C,2) // number of columns
x(i) // x(i+1) // Matlab is 1-based
C(i,j) // C(i+1,j+1) //
@ -51,20 +51,34 @@ v.setLinSpace(size,low,high) // v = linspace(low,high,size)'
// Eigen // Matlab
x.head(n) // x(1:n)
x.head<n>() // x(1:n)
x.tail(n) // N = rows(x); x(N - n: N)
x.tail<n>() // N = rows(x); x(N - n: N)
x.tail(n) // x(end - n + 1: end)
x.tail<n>() // x(end - n + 1: end)
x.segment(i, n) // x(i+1 : i+n)
x.segment<n>(i) // x(i+1 : i+n)
P.block(i, j, rows, cols) // P(i+1 : i+rows, j+1 : j+cols)
P.block<rows, cols>(i, j) // P(i+1 : i+rows, j+1 : j+cols)
P.row(i) // P(i+1, :)
P.col(j) // P(:, j+1)
P.leftCols<cols>() // P(:, 1:cols)
P.leftCols(cols) // P(:, 1:cols)
P.middleCols<cols>(j) // P(:, j+1:j+cols)
P.middleCols(j, cols) // P(:, j+1:j+cols)
P.rightCols<cols>() // P(:, end-cols+1:end)
P.rightCols(cols) // P(:, end-cols+1:end)
P.topRows<rows>() // P(1:rows, :)
P.topRows(rows) // P(1:rows, :)
P.middleRows<rows>(i) // P(:, i+1:i+rows)
P.middleRows(i, rows) // P(:, i+1:i+rows)
P.bottomRows<rows>() // P(:, end-rows+1:end)
P.bottomRows(rows) // P(:, end-rows+1:end)
P.topLeftCorner(rows, cols) // P(1:rows, 1:cols)
P.topRightCorner(rows, cols) // [m n]=size(P); P(1:rows, n-cols+1:n)
P.bottomLeftCorner(rows, cols) // [m n]=size(P); P(m-rows+1:m, 1:cols)
P.bottomRightCorner(rows, cols) // [m n]=size(P); P(m-rows+1:m, n-cols+1:n)
P.topRightCorner(rows, cols) // P(1:rows, end-cols+1:end)
P.bottomLeftCorner(rows, cols) // P(end-rows+1:end, 1:cols)
P.bottomRightCorner(rows, cols) // P(end-rows+1:end, end-cols+1:end)
P.topLeftCorner<rows,cols>() // P(1:rows, 1:cols)
P.topRightCorner<rows,cols>() // [m n]=size(P); P(1:rows, n-cols+1:n)
P.bottomLeftCorner<rows,cols>() // [m n]=size(P); P(m-rows+1:m, 1:cols)
P.bottomRightCorner<rows,cols>() // [m n]=size(P); P(m-rows+1:m, n-cols+1:n)
P.topRightCorner<rows,cols>() // P(1:rows, end-cols+1:end)
P.bottomLeftCorner<rows,cols>() // P(end-rows+1:end, 1:cols)
P.bottomRightCorner<rows,cols>() // P(end-rows+1:end, end-cols+1:end)
// Of particular note is Eigen's swap function which is highly optimized.
// Eigen // Matlab
@ -125,10 +139,8 @@ int r, c;
// Eigen // Matlab
R.minCoeff() // min(R(:))
R.maxCoeff() // max(R(:))
s = R.minCoeff(&r, &c) // [aa, bb] = min(R); [cc, dd] = min(aa);
// r = bb(dd); c = dd; s = cc
s = R.maxCoeff(&r, &c) // [aa, bb] = max(R); [cc, dd] = max(aa);
// row = bb(dd); col = dd; s = cc
s = R.minCoeff(&r, &c) // [s, i] = min(R(:)); [r, c] = ind2sub(size(R), i);
s = R.maxCoeff(&r, &c) // [s, i] = max(R(:)); [r, c] = ind2sub(size(R), i);
R.sum() // sum(R(:))
R.colwise().sum() // sum(R)
R.rowwise().sum() // sum(R, 2) or sum(R')'
@ -150,13 +162,25 @@ x.squaredNorm() // dot(x, x) Note the equivalence is not true for co
x.dot(y) // dot(x, y)
x.cross(y) // cross(x, y) Requires #include <Eigen/Geometry>
//// Type conversion
// Eigen // Matlab
A.cast<double>(); // double(A)
A.cast<float>(); // single(A)
A.cast<int>(); // int32(A)
// if the original type equals destination type, no work is done
// Note that for most operations Eigen requires all operands to have the same type:
MatrixXf F = MatrixXf::Zero(3,3);
A += F; // illegal in Eigen. In Matlab A = A+F is allowed
A += F.cast<double>(); // F converted to double and then added (generally, conversion happens on-the-fly)
// Eigen can map existing memory into Eigen matrices.
float array[3];
Map<Vector3f>(array, 3).fill(10);
int data[4] = 1, 2, 3, 4;
Matrix2i mat2x2(data);
MatrixXi mat2x2 = Map<Matrix2i>(data);
MatrixXi mat2x2 = Map<MatrixXi>(data, 2, 2);
Vector3f::Map(array).fill(10); // create a temporary Map over array and sets entries to 10
int data[4] = {1, 2, 3, 4};
Matrix2i mat2x2(data); // copies data into mat2x2
Matrix2i::Map(data) = 2*mat2x2; // overwrite elements of data with 2*mat2x2
MatrixXi::Map(data, 2, 2) += mat2x2; // adds mat2x2 to elements of data (alternative syntax if size is not know at compile time)
// Solve Ax = b. Result stored in x. Matlab: x = A \ b.
x = A.ldlt().solve(b)); // A sym. p.s.d. #include <Eigen/Cholesky>

View File

@ -64,9 +64,9 @@ run time. However, these assertions do cost time and can thus be turned off.
\c EIGEN_DONT_ALIGN is defined.
- \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN.
- \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. The only
optimization this currently includes is single precision sin() and cos() in the present of SSE
vectorization. Defined by default.
- \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently
enables the SSE vectorization of sin() and cos(), and speedups sqrt() for single precision. Defined to 1 by default.
Define it to 0 to disable.
- \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable
unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not
correspond to the number of iterations or the number of instructions. The default is value 100.

View File

@ -405,19 +405,19 @@ array1 == array2 array1 != array2 array1 == scalar array1 != scalar
array1.min(array2)
array1.max(array2)
array1.abs2()
array1.abs() std::abs(array1)
array1.sqrt() std::sqrt(array1)
array1.log() std::log(array1)
array1.exp() std::exp(array1)
array1.pow(exponent) std::pow(array1,exponent)
array1.abs() abs(array1)
array1.sqrt() sqrt(array1)
array1.log() log(array1)
array1.exp() exp(array1)
array1.pow(exponent) pow(array1,exponent)
array1.square()
array1.cube()
array1.inverse()
array1.sin() std::sin(array1)
array1.cos() std::cos(array1)
array1.tan() std::tan(array1)
array1.asin() std::asin(array1)
array1.acos() std::acos(array1)
array1.sin() sin(array1)
array1.cos() cos(array1)
array1.tan() tan(array1)
array1.asin() asin(array1)
array1.acos() acos(array1)
\endcode
</td></tr>
</table>

View File

@ -127,7 +127,7 @@ VectorNf vec1, vec2;
vec2 = t.linear() * vec1;\endcode</td></tr>
<tr><td>
Apply a \em general transformation \n to a \b normal \b vector
(<a href="http://www.cgafaq.info/wiki/Transforming_normals">explanations</a>)</td><td>\code
(<a href="http://femto.cs.uiuc.edu/faqs/cga-faq.html#S5.27">explanations</a>)</td><td>\code
VectorNf n1, n2;
MatrixNf normalMatrix = t.linear().inverse().transpose();
n2 = (normalMatrix * n1).normalized();\endcode</td></tr>

View File

@ -83,7 +83,7 @@ There is no notion of compressed/uncompressed mode for a SparseVector.
\section TutorialSparseExample First example
Before describing each individual class, let's start with the following typical example: solving the Lapace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions.
Before describing each individual class, let's start with the following typical example: solving the Laplace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions.
Such problem can be mathematically expressed as a linear problem of the form \f$ Ax=b \f$ where \f$ x \f$ is the vector of \c m unknowns (in our case, the values of the pixels), \f$ b \f$ is the right hand side vector resulting from the boundary conditions, and \f$ A \f$ is an \f$ m \times m \f$ matrix containing only a few non-zero elements resulting from the discretization of the Laplacian operator.
<table class="manual">

View File

@ -199,3 +199,13 @@ h3.version {
td.width20em p.endtd {
width: 20em;
}
.bigwarning {
font-size:2em;
font-weight:bold;
margin:1em;
padding:1em;
color:red;
border:solid;
}

View File

@ -18,7 +18,6 @@ $generatedby &#160;<a href="http://www.doxygen.org/index.html">
<!--END !GENERATE_TREEVIEW-->
<!-- Piwik -->
<!--
<script type="text/javascript">
var pkBaseURL = (("https:" == document.location.protocol) ? "https://stats.sylphide-consulting.com/piwik/" : "http://stats.sylphide-consulting.com/piwik/");
document.write(unescape("%3Cscript src='" + pkBaseURL + "piwik.js' type='text/javascript'%3E%3C/script%3E"));
@ -29,7 +28,6 @@ piwikTracker.trackPageView();
piwikTracker.enableLinkTracking();
} catch( err ) {}
</script><noscript><p><img src="http://stats.sylphide-consulting.com/piwik/piwik.php?idsite=20" style="border:0" alt="" /></p></noscript>
-->
<!-- End Piwik Tracking Code -->
</body>

View File

@ -0,0 +1,7 @@
Matrix3f A = Matrix3f::Random(3,3), B;
B << 0,1,0,
0,0,1,
1,0,0;
cout << "At start, A = " << endl << A << endl;
A.applyOnTheLeft(B);
cout << "After applyOnTheLeft, A = " << endl << A << endl;

View File

@ -0,0 +1,9 @@
Matrix3f A = Matrix3f::Random(3,3), B;
B << 0,1,0,
0,0,1,
1,0,0;
cout << "At start, A = " << endl << A << endl;
A *= B;
cout << "After A *= B, A = " << endl << A << endl;
A.applyOnTheRight(B); // equivalent to A *= B
cout << "After applyOnTheRight, A = " << endl << A << endl;

View File

@ -14,8 +14,8 @@ if(QT4_FOUND)
add_custom_command(
TARGET Tutorial_sparse_example
POST_BUILD
COMMAND Tutorial_sparse_example
ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
)
add_dependencies(all_examples Tutorial_sparse_example)
endif(QT4_FOUND)

View File

@ -11,8 +11,8 @@ void insertCoefficient(int id, int i, int j, double w, std::vector<T>& coeffs,
int n = boundary.size();
int id1 = i+j*n;
if(i==-1 || i==n) b(id) -= w * boundary(j); // constrained coeffcieint
else if(j==-1 || j==n) b(id) -= w * boundary(i); // constrained coeffcieint
if(i==-1 || i==n) b(id) -= w * boundary(j); // constrained coefficient
else if(j==-1 || j==n) b(id) -= w * boundary(i); // constrained coefficient
else coeffs.push_back(T(id,id1,w)); // unknown coefficient
}

View File

@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS)
if(EIGEN_Fortran_COMPILER_WORKS)
enable_language(Fortran OPTIONAL)
if(NOT CMAKE_Fortran_COMPILER)
set(EIGEN_Fortran_COMPILER_WORKS OFF)
endif()
endif()
add_custom_target(lapack)

View File

@ -4,6 +4,7 @@
# You should call this script with USER set as you want, else some default
# will be used
USER=${USER:-'orzel'}
UPLOAD_DIR=dox
#ulimit -v 1024000
@ -14,10 +15,10 @@ mkdir build -p
#step 2 : upload
# (the '/' at the end of path is very important, see rsync documentation)
rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-devel/ || { echo "upload failed"; exit 1; }
rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR/ || { echo "upload failed"; exit 1; }
#step 3 : fix the perm
ssh $USER@ssh.tuxfamily.org 'chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/dox-devel' || { echo "perm failed"; exit 1; }
ssh $USER@ssh.tuxfamily.org "chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR" || { echo "perm failed"; exit 1; }
echo "Uploaded successfully"

View File

@ -167,21 +167,14 @@ template<typename ArrayType> void array_real(const ArrayType& m)
Scalar s1 = internal::random<Scalar>();
// these tests are mostly to check possible compilation issues.
// VERIFY_IS_APPROX(m1.sin(), std::sin(m1));
VERIFY_IS_APPROX(m1.sin(), sin(m1));
// VERIFY_IS_APPROX(m1.cos(), std::cos(m1));
VERIFY_IS_APPROX(m1.cos(), cos(m1));
// VERIFY_IS_APPROX(m1.asin(), std::asin(m1));
VERIFY_IS_APPROX(m1.asin(), asin(m1));
// VERIFY_IS_APPROX(m1.acos(), std::acos(m1));
VERIFY_IS_APPROX(m1.acos(), acos(m1));
// VERIFY_IS_APPROX(m1.tan(), std::tan(m1));
VERIFY_IS_APPROX(m1.tan(), tan(m1));
VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval()));
// VERIFY_IS_APPROX(std::cos(m1+RealScalar(3)*m2), std::cos((m1+RealScalar(3)*m2).eval()));
// VERIFY_IS_APPROX(m1.abs().sqrt(), std::sqrt(std::abs(m1)));
VERIFY_IS_APPROX(m1.abs().sqrt(), sqrt(abs(m1)));
VERIFY_IS_APPROX(m1.abs(), sqrt(numext::abs2(m1)));
@ -190,9 +183,10 @@ template<typename ArrayType> void array_real(const ArrayType& m)
if(!NumTraits<Scalar>::IsComplex)
VERIFY_IS_APPROX(numext::real(m1), m1);
VERIFY_IS_APPROX(m1.abs().log() , log(abs(m1)));
// shift argument of logarithm so that it is not zero
Scalar smallNumber = NumTraits<Scalar>::dummy_precision();
VERIFY_IS_APPROX((m1.abs() + smallNumber).log() , log(abs(m1) + smallNumber));
// VERIFY_IS_APPROX(m1.exp(), std::exp(m1));
VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2));
VERIFY_IS_APPROX(m1.exp(), exp(m1));
VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp());
@ -236,7 +230,6 @@ template<typename ArrayType> void array_complex(const ArrayType& m)
m2(i,j) = sqrt(m1(i,j));
VERIFY_IS_APPROX(m1.sqrt(), m2);
// VERIFY_IS_APPROX(m1.sqrt(), std::sqrt(m1));
VERIFY_IS_APPROX(m1.sqrt(), Eigen::sqrt(m1));
}

View File

@ -164,9 +164,13 @@ template<typename MatrixType> void cwise_min_max(const MatrixType& m)
// min/max with scalar input
VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1), m1.cwiseMin( minM1));
VERIFY_IS_APPROX(m1, m1.cwiseMin(maxM1));
VERIFY_IS_APPROX(-m1, (-m1).cwiseMin(-minM1));
VERIFY_IS_APPROX(-m1.array(), ((-m1).array().min)( -minM1));
VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1), m1.cwiseMax( maxM1));
VERIFY_IS_APPROX(m1, m1.cwiseMax(minM1));
VERIFY_IS_APPROX(-m1, (-m1).cwiseMax(-maxM1));
VERIFY_IS_APPROX(-m1.array(), ((-m1).array().max)(-maxM1));
VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1).array(), (m1.array().min)( minM1));
VERIFY_IS_APPROX(m1.array(), (m1.array().min)( maxM1));
@ -202,6 +206,12 @@ template<typename MatrixTraits> void resize(const MatrixTraits& t)
VERIFY(a1.size()==cols);
}
void regression_bug_654()
{
ArrayXf a = RowVectorXf(3);
VectorXf v = Array<float,1,Dynamic>(3);
}
void test_array_for_matrix()
{
for(int i = 0; i < g_repeat; i++) {
@ -239,4 +249,5 @@ void test_array_for_matrix()
CALL_SUBTEST_5( resize(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( resize(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
CALL_SUBTEST_6( regression_bug_654() );
}

View File

@ -263,8 +263,8 @@ template<typename MatrixType> void cholesky_bug241(const MatrixType& m)
// LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal.
// This test checks that LDLT reports correctly that matrix is indefinite.
// See http://forum.kde.org/viewtopic.php?f=74&t=106942
template<typename MatrixType> void cholesky_indefinite(const MatrixType& m)
// See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736
template<typename MatrixType> void cholesky_definiteness(const MatrixType& m)
{
eigen_assert(m.rows() == 2 && m.cols() == 2);
MatrixType mat;
@ -280,6 +280,24 @@ template<typename MatrixType> void cholesky_indefinite(const MatrixType& m)
VERIFY(!ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
{
mat << 0, 0, 0, 0;
LDLT<MatrixType> ldlt(mat);
VERIFY(ldlt.isNegative());
VERIFY(ldlt.isPositive());
}
{
mat << 0, 0, 0, 1;
LDLT<MatrixType> ldlt(mat);
VERIFY(!ldlt.isNegative());
VERIFY(ldlt.isPositive());
}
{
mat << -1, 0, 0, 0;
LDLT<MatrixType> ldlt(mat);
VERIFY(ldlt.isNegative());
VERIFY(!ldlt.isPositive());
}
}
template<typename MatrixType> void cholesky_verify_assert()
@ -309,7 +327,7 @@ void test_cholesky()
CALL_SUBTEST_1( cholesky(Matrix<double,1,1>()) );
CALL_SUBTEST_3( cholesky(Matrix2d()) );
CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) );
CALL_SUBTEST_3( cholesky_indefinite(Matrix2d()) );
CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) );
CALL_SUBTEST_4( cholesky(Matrix3f()) );
CALL_SUBTEST_5( cholesky(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);

View File

@ -60,34 +60,51 @@ void run_matrix_tests()
template <typename Scalar>
void run_vector_tests()
{
typedef Matrix<Scalar, 1, Eigen::Dynamic> MatrixType;
typedef Matrix<Scalar, 1, Eigen::Dynamic> VectorType;
MatrixType m, n;
VectorType m, n;
// boundary cases ...
m = n = MatrixType::Random(50);
m = n = VectorType::Random(50);
m.conservativeResize(1);
VERIFY_IS_APPROX(m, n.segment(0,1));
m = n = MatrixType::Random(50);
m = n = VectorType::Random(50);
m.conservativeResize(50);
VERIFY_IS_APPROX(m, n.segment(0,50));
m = n = VectorType::Random(50);
m.conservativeResize(m.rows(),1);
VERIFY_IS_APPROX(m, n.segment(0,1));
m = n = VectorType::Random(50);
m.conservativeResize(m.rows(),50);
VERIFY_IS_APPROX(m, n.segment(0,50));
// random shrinking ...
for (int i=0; i<50; ++i)
{
const int size = internal::random<int>(1,50);
m = n = MatrixType::Random(50);
m = n = VectorType::Random(50);
m.conservativeResize(size);
VERIFY_IS_APPROX(m, n.segment(0,size));
m = n = VectorType::Random(50);
m.conservativeResize(m.rows(), size);
VERIFY_IS_APPROX(m, n.segment(0,size));
}
// random growing with zeroing ...
for (int i=0; i<50; ++i)
{
const int size = internal::random<int>(50,100);
m = n = MatrixType::Random(50);
m.conservativeResizeLike(MatrixType::Zero(size));
m = n = VectorType::Random(50);
m.conservativeResizeLike(VectorType::Zero(size));
VERIFY_IS_APPROX(m.segment(0,50), n);
VERIFY( size<=50 || m.segment(50,size-50).sum() == Scalar(0) );
m = n = VectorType::Random(50);
m.conservativeResizeLike(Matrix<Scalar,Dynamic,Dynamic>::Zero(1,size));
VERIFY_IS_APPROX(m.segment(0,50), n);
VERIFY( size<=50 || m.segment(50,size-50).sum() == Scalar(0) );
}

View File

@ -12,36 +12,48 @@
#include <Eigen/LU>
#include <Eigen/SVD>
template<typename Scalar> void check_all_var(const Matrix<Scalar,3,1>& ea)
template<typename Scalar>
void verify_euler(const Matrix<Scalar,3,1>& ea, int i, int j, int k)
{
typedef Matrix<Scalar,3,3> Matrix3;
typedef Matrix<Scalar,3,1> Vector3;
typedef AngleAxis<Scalar> AngleAxisx;
using std::abs;
Matrix3 m(AngleAxisx(ea[0], Vector3::Unit(i)) * AngleAxisx(ea[1], Vector3::Unit(j)) * AngleAxisx(ea[2], Vector3::Unit(k)));
Vector3 eabis = m.eulerAngles(i, j, k);
Matrix3 mbis(AngleAxisx(eabis[0], Vector3::Unit(i)) * AngleAxisx(eabis[1], Vector3::Unit(j)) * AngleAxisx(eabis[2], Vector3::Unit(k)));
VERIFY_IS_APPROX(m, mbis);
/* If I==K, and ea[1]==0, then there no unique solution. */
/* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */
if( (i!=k || ea[1]!=0) && (i==k || !internal::isApprox(abs(ea[1]),Scalar(M_PI/2),test_precision<Scalar>())) )
VERIFY((ea-eabis).norm() <= test_precision<Scalar>());
#define VERIFY_EULER(I,J,K, X,Y,Z) { \
Matrix3 m(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \
Vector3 eabis = m.eulerAngles(I,J,K); \
Matrix3 mbis(AngleAxisx(eabis[0], Vector3::Unit##X()) * AngleAxisx(eabis[1], Vector3::Unit##Y()) * AngleAxisx(eabis[2], Vector3::Unit##Z())); \
VERIFY_IS_APPROX(m, mbis); \
/* If I==K, and ea[1]==0, then there no unique solution. */ \
/* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */ \
if( (I!=K || ea[1]!=0) && (I==K || !internal::isApprox(abs(ea[1]),Scalar(M_PI/2),test_precision<Scalar>())) ) VERIFY((ea-eabis).norm() <= test_precision<Scalar>()); \
// approx_or_less_than does not work for 0
VERIFY(0 < eabis[0] || test_isMuchSmallerThan(eabis[0], Scalar(1)));
VERIFY_IS_APPROX_OR_LESS_THAN(eabis[0], Scalar(M_PI));
VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(M_PI), eabis[1]);
VERIFY_IS_APPROX_OR_LESS_THAN(eabis[1], Scalar(M_PI));
VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(M_PI), eabis[2]);
VERIFY_IS_APPROX_OR_LESS_THAN(eabis[2], Scalar(M_PI));
}
VERIFY_EULER(0,1,2, X,Y,Z);
VERIFY_EULER(0,1,0, X,Y,X);
VERIFY_EULER(0,2,1, X,Z,Y);
VERIFY_EULER(0,2,0, X,Z,X);
VERIFY_EULER(1,2,0, Y,Z,X);
VERIFY_EULER(1,2,1, Y,Z,Y);
VERIFY_EULER(1,0,2, Y,X,Z);
VERIFY_EULER(1,0,1, Y,X,Y);
template<typename Scalar> void check_all_var(const Matrix<Scalar,3,1>& ea)
{
verify_euler(ea, 0,1,2);
verify_euler(ea, 0,1,0);
verify_euler(ea, 0,2,1);
verify_euler(ea, 0,2,0);
VERIFY_EULER(2,0,1, Z,X,Y);
VERIFY_EULER(2,0,2, Z,X,Z);
VERIFY_EULER(2,1,0, Z,Y,X);
VERIFY_EULER(2,1,2, Z,Y,Z);
verify_euler(ea, 1,2,0);
verify_euler(ea, 1,2,1);
verify_euler(ea, 1,0,2);
verify_euler(ea, 1,0,1);
verify_euler(ea, 2,0,1);
verify_euler(ea, 2,0,2);
verify_euler(ea, 2,1,0);
verify_euler(ea, 2,1,2);
}
template<typename Scalar> void eulerangles()
@ -63,7 +75,16 @@ template<typename Scalar> void eulerangles()
ea = m.eulerAngles(0,1,0);
check_all_var(ea);
ea = (Array3::Random() + Array3(1,1,0))*Scalar(M_PI)*Array3(0.5,0.5,1);
// Check with purely random Quaternion:
q1.coeffs() = Quaternionx::Coefficients::Random().normalized();
m = q1;
ea = m.eulerAngles(0,1,2);
check_all_var(ea);
ea = m.eulerAngles(0,1,0);
check_all_var(ea);
// Check with random angles in range [0:pi]x[-pi:pi]x[-pi:pi].
ea = (Array3::Random() + Array3(1,0,0))*Scalar(M_PI)*Array3(0.5,1,1);
check_all_var(ea);
ea[2] = ea[0] = internal::random<Scalar>(0,Scalar(M_PI));

View File

@ -279,6 +279,13 @@ template<typename Scalar, int Mode, int Options> void transformations()
t1 = Eigen::Scaling(s0,s0,s0) * t1;
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t0 = t3;
t0.scale(s0);
t1 = t3 * Eigen::Scaling(s0);
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t0.prescale(s0);
t1 = Eigen::Scaling(s0) * t1;
VERIFY_IS_APPROX(t0.matrix(), t1.matrix());
t0.setIdentity();
t0.prerotate(q1).prescale(v0).pretranslate(v0);

View File

@ -51,6 +51,7 @@ void trmm(int rows=internal::random<int>(1,EIGEN_TEST_MAX_SIZE),
ge_xs_save = ge_xs;
VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView<Mode>() * (s2*ge_left.adjoint()) );
ge_sx.setRandom();
ge_sx_save = ge_sx;
VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView<Mode>()).eval());

View File

@ -130,4 +130,8 @@ void test_qr_fullpivoting()
// Test problem size constructors
CALL_SUBTEST_7(FullPivHouseholderQR<MatrixXf>(10, 20));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,10,20> >(10,20)));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,10,20> >(Matrix<float,10,20>::Random())));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,20,10> >(20,10)));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,20,10> >(Matrix<float,20,10>::Random())));
}

View File

@ -14,9 +14,9 @@
static int nb_temporaries;
inline void on_temporary_creation(int size) {
inline void on_temporary_creation(int) {
// here's a great place to set a breakpoint when debugging failures in this test!
if(size!=0) nb_temporaries++;
nb_temporaries++;
}

View File

@ -154,16 +154,16 @@ initSparse(double density,
sparseMat.finalize();
}
template<typename Scalar> void
template<typename Scalar,int Options,typename Index> void
initSparse(double density,
Matrix<Scalar,Dynamic,1>& refVec,
SparseVector<Scalar>& sparseVec,
SparseVector<Scalar,Options,Index>& sparseVec,
std::vector<int>* zeroCoords = 0,
std::vector<int>* nonzeroCoords = 0)
{
sparseVec.reserve(int(refVec.size()*density));
sparseVec.setZero();
for(int i=0; i<refVec.size(); i++)
for(Index i=0; i<refVec.size(); i++)
{
Scalar v = (internal::random<double>(0,1) < density) ? internal::random<Scalar>() : Scalar(0);
if (v!=Scalar(0))
@ -178,10 +178,10 @@ initSparse(double density,
}
}
template<typename Scalar> void
template<typename Scalar,int Options,typename Index> void
initSparse(double density,
Matrix<Scalar,1,Dynamic>& refVec,
SparseVector<Scalar,RowMajor>& sparseVec,
SparseVector<Scalar,Options,Index>& sparseVec,
std::vector<int>* zeroCoords = 0,
std::vector<int>* nonzeroCoords = 0)
{

View File

@ -13,8 +13,9 @@ template<typename SparseMatrixType, typename DenseMatrix, bool IsRowMajor=Sparse
template<typename SparseMatrixType, typename DenseMatrix> struct test_outer<SparseMatrixType,DenseMatrix,false> {
static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) {
int c = internal::random(0,m2.cols()-1);
int c1 = internal::random(0,m2.cols()-1);
typedef typename SparseMatrixType::Index Index;
Index c = internal::random<Index>(0,m2.cols()-1);
Index c1 = internal::random<Index>(0,m2.cols()-1);
VERIFY_IS_APPROX(m4=m2.col(c)*refMat2.col(c1).transpose(), refMat4=refMat2.col(c)*refMat2.col(c1).transpose());
VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.col(c).transpose(), refMat4=refMat2.col(c1)*refMat2.col(c).transpose());
}
@ -22,8 +23,9 @@ template<typename SparseMatrixType, typename DenseMatrix> struct test_outer<Spar
template<typename SparseMatrixType, typename DenseMatrix> struct test_outer<SparseMatrixType,DenseMatrix,true> {
static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) {
int r = internal::random(0,m2.rows()-1);
int c1 = internal::random(0,m2.cols()-1);
typedef typename SparseMatrixType::Index Index;
Index r = internal::random<Index>(0,m2.rows()-1);
Index c1 = internal::random<Index>(0,m2.cols()-1);
VERIFY_IS_APPROX(m4=m2.row(r).transpose()*refMat2.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat2.col(c1).transpose());
VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.row(r), refMat4=refMat2.col(c1)*refMat2.row(r));
}
@ -37,9 +39,9 @@ template<typename SparseMatrixType> void sparse_product()
{
typedef typename SparseMatrixType::Index Index;
Index n = 100;
const Index rows = internal::random<int>(1,n);
const Index cols = internal::random<int>(1,n);
const Index depth = internal::random<int>(1,n);
const Index rows = internal::random<Index>(1,n);
const Index cols = internal::random<Index>(1,n);
const Index depth = internal::random<Index>(1,n);
typedef typename SparseMatrixType::Scalar Scalar;
enum { Flags = SparseMatrixType::Flags };
@ -244,6 +246,7 @@ void test_sparse_product()
CALL_SUBTEST_1( (sparse_product<SparseMatrix<double,RowMajor> >()) );
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, ColMajor > >()) );
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, RowMajor > >()) );
CALL_SUBTEST_3( (sparse_product<SparseMatrix<float,ColMajor,long int> >()) );
CALL_SUBTEST_4( (sparse_product_regression_test<SparseMatrix<double,RowMajor>, Matrix<double, Dynamic, Dynamic, RowMajor> >()) );
}
}

View File

@ -9,14 +9,14 @@
#include "sparse.h"
template<typename Scalar> void sparse_vector(int rows, int cols)
template<typename Scalar,typename Index> void sparse_vector(int rows, int cols)
{
double densityMat = (std::max)(8./(rows*cols), 0.01);
double densityVec = (std::max)(8./float(rows), 0.1);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
typedef SparseVector<Scalar> SparseVectorType;
typedef SparseMatrix<Scalar> SparseMatrixType;
typedef SparseVector<Scalar,0,Index> SparseVectorType;
typedef SparseMatrix<Scalar,0,Index> SparseMatrixType;
Scalar eps = 1e-6;
SparseMatrixType m1(rows,rows);
@ -101,9 +101,10 @@ template<typename Scalar> void sparse_vector(int rows, int cols)
void test_sparse_vector()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( sparse_vector<double>(8, 8) );
CALL_SUBTEST_2( sparse_vector<std::complex<double> >(16, 16) );
CALL_SUBTEST_1( sparse_vector<double>(299, 535) );
CALL_SUBTEST_1(( sparse_vector<double,int>(8, 8) ));
CALL_SUBTEST_2(( sparse_vector<std::complex<double>, int>(16, 16) ));
CALL_SUBTEST_1(( sparse_vector<double,long int>(299, 535) ));
CALL_SUBTEST_1(( sparse_vector<double,short>(299, 535) ));
}
}

View File

@ -55,8 +55,16 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
Index rows = m.rows();
Index cols = m.cols();
Scalar big = internal::random<Scalar>() * ((std::numeric_limits<RealScalar>::max)() * RealScalar(1e-4));
Scalar small = internal::random<Scalar>() * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
// get a non-zero random factor
Scalar factor = internal::random<Scalar>();
while(numext::abs2(factor)<RealScalar(1e-4))
factor = internal::random<Scalar>();
Scalar big = factor * ((std::numeric_limits<RealScalar>::max)() * RealScalar(1e-4));
factor = internal::random<Scalar>();
while(numext::abs2(factor)<RealScalar(1e-4))
factor = internal::random<Scalar>();
Scalar small = factor * ((std::numeric_limits<RealScalar>::min)() * RealScalar(1e4));
MatrixType vzero = MatrixType::Zero(rows, cols),
vrand = MatrixType::Random(rows, cols),

View File

@ -130,10 +130,11 @@ void run_fixed_size_test(int num_elements)
// MUST be positive because in any other case det(cR_t) may become negative for
// odd dimensions!
const Scalar c = abs(internal::random<Scalar>());
// Also if c is to small compared to t.norm(), problem is ill-posed (cf. Bug 744)
const Scalar c = internal::random<Scalar>(0.5, 2.0);
FixedMatrix R = randMatrixSpecialUnitary<Scalar>(dim);
FixedVector t = Scalar(50)*FixedVector::Random(dim,1);
FixedVector t = Scalar(32)*FixedVector::Random(dim,1);
HomMatrix cR_t = HomMatrix::Identity(dim+1,dim+1);
cR_t.block(0,0,dim,dim) = c*R;
@ -149,9 +150,9 @@ void run_fixed_size_test(int num_elements)
HomMatrix cR_t_umeyama = umeyama(src_block, dst_block);
const Scalar error = ( cR_t_umeyama*src - dst ).array().square().sum();
const Scalar error = ( cR_t_umeyama*src - dst ).squaredNorm();
VERIFY(error < Scalar(10)*std::numeric_limits<Scalar>::epsilon());
VERIFY(error < Scalar(16)*std::numeric_limits<Scalar>::epsilon());
}
void test_umeyama()

View File

@ -101,6 +101,16 @@ template<typename ArrayType> void vectorwiseop_array(const ArrayType& m)
VERIFY_RAISES_ASSERT(m2.rowwise() /= rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() / rowvec.transpose());
m2 = m1;
// yes, there might be an aliasing issue there but ".rowwise() /="
// is suppposed to evaluate " m2.colwise().sum()" into to temporary to avoid
// evaluating the reducions multiple times
if(ArrayType::RowsAtCompileTime>2 || ArrayType::RowsAtCompileTime==Dynamic)
{
m2.rowwise() /= m2.colwise().sum();
VERIFY_IS_APPROX(m2, m1.rowwise() / m1.colwise().sum());
}
}
template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)

View File

@ -9,12 +9,26 @@
#include "main.h"
template<typename MatrixType> void zeroReduction(const MatrixType& m) {
// Reductions that must hold for zero sized objects
VERIFY(m.all());
VERIFY(!m.any());
VERIFY(m.prod()==1);
VERIFY(m.sum()==0);
VERIFY(m.count()==0);
VERIFY(m.allFinite());
VERIFY(!m.hasNaN());
}
template<typename MatrixType> void zeroSizedMatrix()
{
MatrixType t1;
if (MatrixType::SizeAtCompileTime == Dynamic)
if (MatrixType::SizeAtCompileTime == Dynamic || MatrixType::SizeAtCompileTime == 0)
{
zeroReduction(t1);
if (MatrixType::RowsAtCompileTime == Dynamic)
VERIFY(t1.rows() == 0);
if (MatrixType::ColsAtCompileTime == Dynamic)
@ -22,9 +36,13 @@ template<typename MatrixType> void zeroSizedMatrix()
if (MatrixType::RowsAtCompileTime == Dynamic && MatrixType::ColsAtCompileTime == Dynamic)
{
MatrixType t2(0, 0);
VERIFY(t2.rows() == 0);
VERIFY(t2.cols() == 0);
zeroReduction(t2);
VERIFY(t1==t2);
}
}
}
@ -33,11 +51,15 @@ template<typename VectorType> void zeroSizedVector()
{
VectorType t1;
if (VectorType::SizeAtCompileTime == Dynamic)
if (VectorType::SizeAtCompileTime == Dynamic || VectorType::SizeAtCompileTime==0)
{
zeroReduction(t1);
VERIFY(t1.size() == 0);
VectorType t2(DenseIndex(0)); // DenseIndex disambiguates with 0-the-null-pointer (error with gcc 4.4 and MSVC8)
VERIFY(t2.size() == 0);
zeroReduction(t2);
VERIFY(t1==t2);
}
}
@ -51,9 +73,12 @@ void test_zerosized()
zeroSizedMatrix<Matrix<float, Dynamic, 0, 0, 0, 0> >();
zeroSizedMatrix<Matrix<float, 0, Dynamic, 0, 0, 0> >();
zeroSizedMatrix<Matrix<float, Dynamic, Dynamic, 0, 0, 0> >();
zeroSizedMatrix<Matrix<float, 0, 4> >();
zeroSizedMatrix<Matrix<float, 4, 0> >();
zeroSizedVector<Vector2d>();
zeroSizedVector<Vector3i>();
zeroSizedVector<VectorXf>();
zeroSizedVector<Matrix<float, 0, 1> >();
zeroSizedVector<Matrix<float, 1, 0> >();
}

View File

@ -11,7 +11,12 @@
#define EIGEN_OPENGL_MODULE
#include <Eigen/Geometry>
#if defined(__APPLE_CC__)
#include <OpenGL/gl.h>
#else
#include <GL/gl.h>
#endif
namespace Eigen {

View File

@ -58,7 +58,9 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
Scalar rho, rho_1, alpha;
d.setZero();
CINV.startFill(); // FIXME estimate the number of non-zeros
typedef Triplet<double> T;
std::vector<T> tripletList;
for (Index i = 0; i < rows; ++i)
{
d[i] = 1.0;
@ -84,11 +86,12 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
// FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
for (Index j=0; j<l.size(); ++j)
if (l[j]<1e-15)
CINV.fill(i,j) = l[j];
tripletList.push_back(T(i,j,l(j)));
d[i] = 0.0;
}
CINV.endFill();
CINV.setFromTriplets(tripletList.begin(), tripletList.end());
}
@ -103,6 +106,7 @@ template<typename TMatrix, typename CMatrix,
void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
const VectorB& b, const VectorF& f, IterationController &iter)
{
using std::sqrt;
typedef typename TMatrix::Scalar Scalar;
typedef typename TMatrix::Index Index;
typedef Matrix<Scalar,Dynamic,1> TmpVec;

View File

@ -16,9 +16,6 @@ std::complex<T> RandomCpx() { return std::complex<T>( (T)(rand()/(T)RAND_MAX - .
using namespace std;
using namespace Eigen;
float norm(float x) {return x*x;}
double norm(double x) {return x*x;}
long double norm(long double x) {return x*x;}
template < typename T>
complex<long double> promote(complex<T> x) { return complex<long double>(x.real(),x.imag()); }
@ -40,11 +37,11 @@ complex<long double> promote(long double x) { return complex<long double>( x);
for (size_t k1=0;k1<(size_t)timebuf.size();++k1) {
acc += promote( timebuf[k1] ) * exp( complex<long double>(0,k1*phinc) );
}
totalpower += norm(acc);
totalpower += numext::abs2(acc);
complex<long double> x = promote(fftbuf[k0]);
complex<long double> dif = acc - x;
difpower += norm(dif);
//cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(norm(dif)) << endl;
difpower += numext::abs2(dif);
//cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(numext::abs2(dif)) << endl;
}
cerr << "rmse:" << sqrt(difpower/totalpower) << endl;
return sqrt(difpower/totalpower);
@ -57,8 +54,8 @@ complex<long double> promote(long double x) { return complex<long double>( x);
long double difpower=0;
size_t n = (min)( buf1.size(),buf2.size() );
for (size_t k=0;k<n;++k) {
totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
difpower += norm(buf1[k] - buf2[k]);
totalpower += (numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2.;
difpower += numext::abs2(buf1[k] - buf2[k]);
}
return sqrt(difpower/totalpower);
}

View File

@ -2,21 +2,48 @@ project (GeographicLib)
# Version information
set (PROJECT_VERSION_MAJOR 1)
set (PROJECT_VERSION_MINOR 34)
set (PROJECT_VERSION_MINOR 35)
set (PROJECT_VERSION_PATCH 0)
set (PROJECT_VERSION "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
if (PROJECT_VERSION_PATCH GREATER 0)
set (PROJECT_VERSION "${PROJECT_VERSION}.${PROJECT_VERSION_PATCH}")
endif ()
if (DEFINED CPACK_PACKAGE_VERSION_COUNT)
# majic (version 0.1.9 and later) invokes cmake defining, e.g.,
# -D CPACK_PACKAGE_VERSION=1.35-SNAPSHOT
# -D CPACK_PACKAGE_VERSION_COUNT=2
# -D CPACK_PACKAGE_VERSION_MAJOR=1
# -D CPACK_PACKAGE_VERSION_MINOR=35
# -D CPACK_PACKAGE_VERSION_SUFFIX=-SNAPSHOT
# Check that the first two version numbers are consistent.
if (CPACK_PACKAGE_VERSION_COUNT EQUAL 2)
set (CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
elseif (CPACK_PACKAGE_VERSION_COUNT LESS 2)
message (FATAL_ERROR "CPACK_PACKAGE_VERSION_COUNT must be 2 or more")
endif ()
if (NOT (
CPACK_PACKAGE_VERSION_MAJOR EQUAL PROJECT_VERSION_MAJOR AND
CPACK_PACKAGE_VERSION_MINOR EQUAL PROJECT_VERSION_MINOR))
message (FATAL_ERROR "Inconsistency in CPACK and PROJECT version numbers")
endif ()
set (PROJECT_VERSION_PATCH ${CPACK_PACKAGE_VERSION_PATCH})
set (PROJECT_VERSION ${CPACK_PACKAGE_VERSION})
else ()
set (CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
set (CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
set (CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH})
set (CPACK_PACKAGE_VERSION ${PROJECT_VERSION})
endif ()
# The library version tracks the numbering given by libtool in the
# autoconf set up.
set (LIBVERSION 10)
set (LIBVERSIONFULL 10.1.1)
set (LIBVERSIONFULL 10.1.2)
string (TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
string (TOUPPER ${PROJECT_NAME} PROJECT_NAME_UPPER)
@ -201,8 +228,9 @@ if (NOT MSVC)
endif ()
endif ()
if (NOT CYGWIN)
# cygwin has a long double but the support for ::cbrtl etc is missing
if (NOT (CYGWIN OR ANDROID))
# cygwin and android have a long double but the support for ::cbrtl,
# etc., is missing
include (CheckTypeSize)
check_type_size ("long double" LONG_DOUBLE BUILTIN_TYPES_ONLY)
endif ()
@ -296,7 +324,12 @@ if (MSVC)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
else ()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra")
# check for C++11 support
# check for C++11 support. If available, the C++11 static_assert and
# various math functions (std::atanh, etc.) are used. This flag is
# *not* propagated to clients that use GeographicLib. However, this
# is of no consequence. When the client code is being compiled (and
# the GeographicLib headers being included), work-alike substitutions
# for static_assert and std::atanh are used.
include (CheckCXXCompilerFlag)
set (CXX11FLAG "-std=c++11")
check_cxx_compiler_flag (${CXX11FLAG} CXX11TEST1)
@ -388,7 +421,7 @@ set (CPACK_SOURCE_IGNORE_FILES
"${PROJECT_SOURCE_DIR}/BUILD"
"${PROJECT_SOURCE_DIR}/(tests|testdata|cgi-bin|.*\\\\.cache)/"
"${PROJECT_SOURCE_DIR}/(distrib|.*-distrib|.*-installer|geodesic-papers)/"
"${PROJECT_SOURCE_DIR}/[^/]*\\\\.(html|kmz|pdf|xml)\$"
"${PROJECT_SOURCE_DIR}/[^/]*\\\\.(html|kmz|pdf)\$"
"${PROJECT_SOURCE_DIR}/(autogen|biblio|js-compress)\\\\.sh\$"
"${PROJECT_SOURCE_DIR}/(geodesic-biblio.txt|makefile-admin|[^/]*\\\\.png)\$"
"${PROJECT_SOURCE_DIR}/matlab/matlab-.*blurb.txt\$" )

Some files were not shown because too many files have changed in this diff Show More