Merge remote-tracking branch 'origin/develop' into feature/OptimizeSimilarity

Conflicts:
	gtsam_unstable/geometry/tests/testSimilarity3.cpp
release/4.3a0
dellaert 2016-02-07 13:00:29 -08:00
commit faffcb193e
548 changed files with 29784 additions and 14398 deletions

3362
.cproject

File diff suppressed because it is too large Load Diff

2
.gitignore vendored
View File

@ -6,3 +6,5 @@
/examples/Data/pose3example-rewritten.txt
*.txt.user
*.txt.user.6d59f0c
/python-build/
*.pydevproject

View File

@ -64,6 +64,8 @@ option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TB
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" ON)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" ON)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module" ON)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
# Options relating to MATLAB wrapper
# TODO: Check for matlab mex binary before handling building of binaries
@ -82,6 +84,10 @@ if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_STATIC_LIBRARY)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_BUILD_STATIC_LIBRARY are both enabled. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of GTSAM_BUILD_STATIC_LIBRARY.")
endif()
if(GTSAM_BUILD_PYTHON AND GTSAM_ALLOW_DEPRECATED_SINCE_V4)
message(FATAL_ERROR "GTSAM_BUILD_PYTHON and GTSAM_ALLOW_DEPRECATED_SINCE_V4 are both enabled. The python module cannot be compiled with deprecated functions turned on. Turn one of the two options off.")
endif()
# Flags for choosing default packaging tools
set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
@ -131,7 +137,7 @@ endif()
if(NOT (${Boost_VERSION} LESS 105600))
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES)
add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES -DBOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
endif()
###############################################################################
@ -158,6 +164,12 @@ else()
set(GTSAM_USE_TBB 0) # This will go into config.h
endif()
###############################################################################
# Prohibit Timing build mode in combination with TBB
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
endif()
###############################################################################
# Find Google perftools
@ -173,6 +185,11 @@ if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
include_directories(${MKL_INCLUDE_DIR})
list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
# --no-as-needed is required with gcc according to the MKL link advisor
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed")
endif()
else()
set(GTSAM_USE_EIGEN_MKL 0)
set(EIGEN_USE_MKL_ALL 0)
@ -266,7 +283,8 @@ endif()
# Include boost - use 'BEFORE' so that a specific boost specified to CMake
# takes precedence over a system-installed one.
include_directories(BEFORE ${Boost_INCLUDE_DIR})
# Use 'SYSTEM' to ignore compiler warnings in Boost headers
include_directories(BEFORE SYSTEM ${Boost_INCLUDE_DIR})
# Add includes for source directories 'BEFORE' boost and any system include
# paths so that the compiler uses GTSAM headers in our source directory instead
@ -293,10 +311,21 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
endif()
endif()
# As of XCode 7, clang also complains about this
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
add_definitions(-Wno-unused-local-typedefs)
endif()
endif()
if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
add_definitions(-DGTSAM_EXTRA_CONSISTENCY_CHECKS)
endif()
if(GTSAM_ALLOW_DEPRECATED_SINCE_V4)
add_definitions(-DGTSAM_ALLOW_DEPRECATED_SINCE_V4)
endif()
###############################################################################
# Add components
@ -325,6 +354,20 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX)
add_subdirectory(matlab)
endif()
# Python wrap
if (GTSAM_BUILD_PYTHON)
include(GtsamPythonWrap)
# NOTE: The automatic generation of python wrapper from the gtsampy.h interface is
# not working yet, so we're using a handwritten wrapper files on python/handwritten.
# Once the python wrapping from the interface file is working, you can _swap_ the
# comments on the next lines
# wrap_and_install_python(gtsampy.h "${GTSAM_ADDITIONAL_LIBRARIES}" "")
add_subdirectory(python)
endif()
# Build gtsam_unstable
if (GTSAM_BUILD_UNSTABLE)
add_subdirectory(gtsam_unstable)
@ -375,6 +418,8 @@ set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>=
# Print configuration variables
message(STATUS "===============================================================")
message(STATUS "================ Configuration Options ======================")
message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}")
message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}")
message(STATUS "Build flags ")
print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ")
@ -435,10 +480,22 @@ print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default R
print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ")
message(STATUS "MATLAB toolbox flags ")
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
message(STATUS "Python module flags ")
if(GTSAM_PYTHON_WARNINGS)
message(STATUS " Build python module : No - dependencies missing")
else()
print_config_flag(${GTSAM_BUILD_PYTHON} "Build python module ")
endif()
if(GTSAM_BUILD_PYTHON)
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
endif()
message(STATUS "===============================================================")
# Print warnings at the end
@ -451,6 +508,9 @@ endif()
if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
message(WARNING "Your compiler does not support OpenMP - this is ok, but performance may be improved with OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning.")
endif()
if(GTSAM_BUILD_PYTHON AND GTSAM_PYTHON_WARNINGS)
message(WARNING "${GTSAM_PYTHON_WARNINGS}")
endif()
# Include CPack *after* all flags
include(CPack)

View File

@ -31,6 +31,7 @@ Prerequisites:
- [Boost](http://www.boost.org/users/download/) >= 1.43 (Ubuntu: `sudo apt-get install libboost-all-dev`)
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 2.6 (Ubuntu: `sudo apt-get install cmake`)
- A modern compiler, i.e., at least gcc 4.7.3 on Linux.
Optional prerequisites - used automatically if findable by CMake:
@ -46,6 +47,6 @@ See the [`INSTALL`](INSTALL) file for more detailed installation instructions.
GTSAM is open source under the BSD license, see the [`LICENSE`](LICENSE) and [`LICENSE.BSD`](LICENSE.BSD) files.
Please see the [`examples/`](examples) directory and the [`USAGE`](USAGE.md) file for examples on how to use GTSAM.
Please see the [`examples/`](examples) directory and the [`USAGE`](USAGE.md) file for examples on how to use GTSAM.
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS).

4
THANKS
View File

@ -38,6 +38,10 @@ at Uni Zurich:
* Christian Forster
at LAAS-CNRS
* Ellon Paiva
Many thanks for your hard work!!!!
Frank Dellaert

102
cmake/FindNumPy.cmake Normal file
View File

@ -0,0 +1,102 @@
# - Find the NumPy libraries
# This module finds if NumPy is installed, and sets the following variables
# indicating where it is.
#
# TODO: Update to provide the libraries and paths for linking npymath lib.
#
# NUMPY_FOUND - was NumPy found
# NUMPY_VERSION - the version of NumPy found as a string
# NUMPY_VERSION_MAJOR - the major version number of NumPy
# NUMPY_VERSION_MINOR - the minor version number of NumPy
# NUMPY_VERSION_PATCH - the patch version number of NumPy
# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601
# NUMPY_INCLUDE_DIRS - path to the NumPy include files
#============================================================================
# Copyright 2012 Continuum Analytics, Inc.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#============================================================================
# Finding NumPy involves calling the Python interpreter
if(NumPy_FIND_REQUIRED)
find_package(PythonInterp REQUIRED)
else()
find_package(PythonInterp)
endif()
if(NOT PYTHONINTERP_FOUND)
set(NUMPY_FOUND FALSE)
return()
endif()
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
"import numpy as n; print(n.__version__); print(n.get_include());"
RESULT_VARIABLE _NUMPY_SEARCH_SUCCESS
OUTPUT_VARIABLE _NUMPY_VALUES_OUTPUT
ERROR_VARIABLE _NUMPY_ERROR_VALUE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT _NUMPY_SEARCH_SUCCESS MATCHES 0)
if(NumPy_FIND_REQUIRED)
message(FATAL_ERROR
"NumPy import failure:\n${_NUMPY_ERROR_VALUE}")
endif()
set(NUMPY_FOUND FALSE)
return()
endif()
# Convert the process output into a list
string(REGEX REPLACE ";" "\\\\;" _NUMPY_VALUES ${_NUMPY_VALUES_OUTPUT})
string(REGEX REPLACE "\n" ";" _NUMPY_VALUES ${_NUMPY_VALUES})
# Just in case there is unexpected output from the Python command.
list(GET _NUMPY_VALUES -2 NUMPY_VERSION)
list(GET _NUMPY_VALUES -1 NUMPY_INCLUDE_DIRS)
string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" _VER_CHECK "${NUMPY_VERSION}")
if("${_VER_CHECK}" STREQUAL "")
# The output from Python was unexpected. Raise an error always
# here, because we found NumPy, but it appears to be corrupted somehow.
message(FATAL_ERROR
"Requested version and include path from NumPy, got instead:\n${_NUMPY_VALUES_OUTPUT}\n")
return()
endif()
# Make sure all directory separators are '/'
string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIRS ${NUMPY_INCLUDE_DIRS})
# Get the major and minor version numbers
string(REGEX REPLACE "\\." ";" _NUMPY_VERSION_LIST ${NUMPY_VERSION})
list(GET _NUMPY_VERSION_LIST 0 NUMPY_VERSION_MAJOR)
list(GET _NUMPY_VERSION_LIST 1 NUMPY_VERSION_MINOR)
list(GET _NUMPY_VERSION_LIST 2 NUMPY_VERSION_PATCH)
string(REGEX MATCH "[0-9]*" NUMPY_VERSION_PATCH ${NUMPY_VERSION_PATCH})
math(EXPR NUMPY_VERSION_DECIMAL
"(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}")
find_package_message(NUMPY
"Found NumPy: version \"${NUMPY_VERSION}\" ${NUMPY_INCLUDE_DIRS}"
"${NUMPY_INCLUDE_DIRS}${NUMPY_VERSION}")
set(NUMPY_FOUND TRUE)

View File

@ -34,19 +34,19 @@ if(NOT FIRST_PASS_DONE)
set(CMAKE_MODULE_LINKER_FLAGS_PROFILING "${CMAKE_MODULE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING CMAKE_EXE_LINKER_FLAGS_PROFILING CMAKE_SHARED_LINKER_FLAGS_PROFILING CMAKE_MODULE_LINKER_FLAGS_PROFILING)
else()
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
set(CMAKE_C_FLAGS_RELEASE "-O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -std=c11 -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -std=c++11 -fno-inline -Wall -DEIGEN_INITIALIZE_MATRICES_BY_NAN" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-std=c11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-std=c++11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
set(CMAKE_C_FLAGS_RELEASE "-std=c11 -O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
set(CMAKE_CXX_FLAGS_RELEASE "-std=c++11 -O3 -Wall -DNDEBUG -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
set(CMAKE_C_FLAGS_TIMING "${CMAKE_C_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
set(CMAKE_CXX_FLAGS_TIMING "${CMAKE_CXX_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
set(CMAKE_EXE_LINKER_FLAGS_TIMING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during timing builds." FORCE)
set(CMAKE_SHARED_LINKER_FLAGS_TIMING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during timing builds." FORCE)
mark_as_advanced(CMAKE_C_FLAGS_TIMING CMAKE_CXX_FLAGS_TIMING CMAKE_EXE_LINKER_FLAGS_TIMING CMAKE_SHARED_LINKER_FLAGS_TIMING)
set(CMAKE_C_FLAGS_PROFILING "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
set(CMAKE_CXX_FLAGS_PROFILING "-g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
set(CMAKE_C_FLAGS_PROFILING "-std=c11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
set(CMAKE_CXX_FLAGS_PROFILING "-std=c++11 -g -O3 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
set(CMAKE_EXE_LINKER_FLAGS_PROFILING "${CMAKE_EXE_LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
set(CMAKE_SHARED_LINKER_FLAGS_PROFILING "${CMAKE__LINKER_FLAGS_RELEASE}" CACHE STRING "Linker flags during profiling builds." FORCE)
mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING CMAKE_EXE_LINKER_FLAGS_PROFILING CMAKE_SHARED_LINKER_FLAGS_PROFILING)

View File

@ -1,5 +1,5 @@
#Setup cache options
option(GTSAM_BUILD_PYTHON "Build Python wrapper statically (increases build time)" OFF)
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version")
set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation")
set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python")
if(NOT GTSAM_PYTHON_INSTALL_PATH)
@ -8,13 +8,13 @@ endif()
#Author: Paul Furgale Modified by Andrew Melim
function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
# Boost
find_package(Boost COMPONENTS python filesystem system REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
# # Boost
# find_package(Boost COMPONENTS python filesystem system REQUIRED)
# include_directories(${Boost_INCLUDE_DIRS})
# Find Python
FIND_PACKAGE(PythonLibs 2.7 REQUIRED)
INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS})
# # Find Python
# FIND_PACKAGE(PythonLibs 2.7 REQUIRED)
# INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS})
IF(APPLE)
# The apple framework headers don't include the numpy headers for some reason.
@ -36,23 +36,46 @@ function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
ENDIF()
ENDIF(APPLE)
if(MSVC)
add_library(${moduleName}_python MODULE ${ARGN})
set_target_properties(${moduleName}_python PROPERTIES
OUTPUT_NAME ${moduleName}_python
CLEAN_DIRECT_OUTPUT 1
VERSION 1
SOVERSION 0
SUFFIX ".pyd")
target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
# Create a static library version
add_library(${TARGET_NAME} SHARED ${ARGN})
set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>)
message(${PYLIB_OUTPUT_FILE})
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd)
target_link_libraries(${TARGET_NAME} ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} gtsam-shared)
set_target_properties(${TARGET_NAME} PROPERTIES
OUTPUT_NAME ${TARGET_NAME}
CLEAN_DIRECT_OUTPUT 1
VERSION 1
SOVERSION 0)
ELSE()
# Create a shared library
add_library(${moduleName}_python SHARED ${generated_cpp_file})
set_target_properties(${moduleName}_python PROPERTIES
OUTPUT_NAME ${moduleName}_python
CLEAN_DIRECT_OUTPUT 1)
target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
# On OSX and Linux, the python library must end in the extension .so. Build this
# filename here.
get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION)
set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>)
message(${PYLIB_OUTPUT_FILE})
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
set(PYLIB_SO_NAME lib${moduleName}_python.so)
ENDIF(MSVC)
# On OSX and Linux, the python library must end in the extension .so. Build this
# filename here.
get_property(PYLIB_OUTPUT_FILE TARGET ${TARGET_NAME} PROPERTY LOCATION)
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.so)
# Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package
set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam)
# Cause the library to be output in the correct directory.
add_custom_command(TARGET ${moduleName}_python
POST_BUILD
COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
COMMENT "Copying library files to python directory" )
# Cause the library to be output in the correct directory.
add_custom_command(TARGET ${TARGET_NAME}
@ -64,4 +87,16 @@ function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES)
list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME})
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}")
endfunction(wrap_python)
endfunction(wrap_python)
# Macro to get list of subdirectories
macro(SUBDIRLIST result curdir)
file(GLOB children RELATIVE ${curdir} ${curdir}/*)
set(dirlist "")
foreach(child ${children})
if(IS_DIRECTORY ${curdir}/${child})
list(APPEND dirlist ${child})
endif()
endforeach()
set(${result} ${dirlist})
endmacro()

View File

@ -195,7 +195,9 @@ macro(gtsamAddTestsGlob_impl groupName globPatterns excludedFiles linkLibraries)
add_test(NAME ${target_name} COMMAND ${target_name})
add_dependencies(check.${groupName} ${target_name})
add_dependencies(check ${target_name})
add_dependencies(all.tests ${script_name})
if(NOT XCODE_VERSION)
add_dependencies(all.tests ${script_name})
endif()
# Add TOPSRCDIR
set_property(SOURCE ${script_srcs} APPEND PROPERTY COMPILE_DEFINITIONS "TOPSRCDIR=\"${PROJECT_SOURCE_DIR}\"")

2
doc/.gitignore vendored
View File

@ -1 +1,3 @@
/html/
*.lyx~
*.bib~

1399
doc/ImuFactor.lyx Normal file

File diff suppressed because it is too large Load Diff

BIN
doc/ImuFactor.pdf Normal file

Binary file not shown.

View File

@ -76,335 +76,10 @@ Frank Dellaert
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_inset CommandInset include
LatexCommand include
filename "macros.lyx"
\begin_layout Plain Layout
Derivatives
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\deriv}[2]{\frac{\partial#1}{\partial#2}}
{\frac{\partial#1}{\partial#2}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\at}[2]{#1\biggr\rvert_{#2}}
{#1\biggr\rvert_{#2}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\Jac}[3]{ \at{\deriv{#1}{#2}} {#3} }
{\at{\deriv{#1}{#2}}{#3}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
Lie Groups
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\xhat}{\hat{x}}
{\hat{x}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\yhat}{\hat{y}}
{\hat{y}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\Ad}[1]{Ad_{#1}}
{Ad_{#1}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\AAdd}[1]{\mathbf{\mathop{Ad}}{}_{#1}}
{\mathbf{\mathop{Ad}}{}_{#1}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\define}{\stackrel{\Delta}{=}}
{\stackrel{\Delta}{=}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\gg}{\mathfrak{g}}
{\mathfrak{g}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\Rn}{\mathbb{R}^{n}}
{\mathbb{R}^{n}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
SO(2), 1
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\Rtwo}{\mathfrak{\mathbb{R}^{2}}}
{\mathfrak{\mathbb{R}^{2}}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\SOtwo}{SO(2)}
{SO(2)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\sotwo}{\mathfrak{so(2)}}
{\mathfrak{so(2)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\that}{\hat{\theta}}
{\hat{\theta}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\skew}[1]{[#1]_{+}}
{[#1]_{+}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
SE(2), 3
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\SEtwo}{SE(2)}
{SE(2)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\setwo}{\mathfrak{se(2)}}
{\mathfrak{se(2)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\Skew}[1]{[#1]_{\times}}
{[#1]_{\times}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
SO(3), 3
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\Rthree}{\mathfrak{\mathbb{R}^{3}}}
{\mathfrak{\mathbb{R}^{3}}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\SOthree}{SO(3)}
{SO(3)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\sothree}{\mathfrak{so(3)}}
{\mathfrak{so(3)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\what}{\hat{\omega}}
{\hat{\omega}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
SE(3),6
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\Rsix}{\mathfrak{\mathbb{R}^{6}}}
{\mathfrak{\mathbb{R}^{6}}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\SEthree}{SE(3)}
{SE(3)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\sethree}{\mathfrak{se(3)}}
{\mathfrak{se(3)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\xihat}{\hat{\xi}}
{\hat{\xi}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
Aff(2),6
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\Afftwo}{Aff(2)}
{Aff(2)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\afftwo}{\mathfrak{aff(2)}}
{\mathfrak{aff(2)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\aa}{a}
{a}
\end_inset
\begin_inset FormulaMacro
\newcommand{\ahat}{\hat{a}}
{\hat{a}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status collapsed
\begin_layout Plain Layout
SL(3),8
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\SLthree}{SL(3)}
{SL(3)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\slthree}{\mathfrak{sl(3)}}
{\mathfrak{sl(3)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\hh}{h}
{h}
\end_inset
\begin_inset FormulaMacro
\newcommand{\hhat}{\hat{h}}
{\hat{h}}
\end_inset

View File

@ -1,42 +1,60 @@
#LyX 1.6.5 created this file. For more info see http://www.lyx.org/
\lyxformat 345
#LyX 2.0 created this file. For more info see http://www.lyx.org/
\lyxformat 413
\begin_document
\begin_header
\textclass article
\use_default_options true
\maintain_unincluded_children false
\language english
\language_package default
\inputencoding auto
\fontencoding global
\font_roman default
\font_sans default
\font_typewriter default
\font_default_family default
\use_non_tex_fonts false
\font_sc false
\font_osf false
\font_sf_scale 100
\font_tt_scale 100
\graphics default
\default_output_format default
\output_sync 0
\bibtex_command default
\index_command default
\paperfontsize default
\use_hyperref false
\papersize default
\use_geometry false
\use_amsmath 1
\use_esint 1
\use_mhchem 1
\use_mathdots 0
\cite_engine basic
\use_bibtopic false
\use_indices false
\paperorientation portrait
\suppress_date false
\use_refstyle 0
\index Index
\shortcut idx
\color #008000
\end_index
\secnumdepth 3
\tocdepth 3
\paragraph_separation indent
\defskip medskip
\paragraph_indentation default
\quotes_language english
\papercolumns 1
\papersides 1
\paperpagestyle default
\tracking_changes false
\output_changes false
\author ""
\author ""
\html_math_output 0
\html_css_as_file 0
\html_be_strict false
\end_header
\begin_body
@ -62,14 +80,14 @@ Derivatives
\begin_inset FormulaMacro
\newcommand{\at}[2]{#1\biggr\rvert_{#2}}
{#1\biggr\rvert_{#2}}
\newcommand{\at}[1]{#1\biggr\vert_{\#2}}
{#1\biggr\vert_{\#2}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\Jac}[3]{ \at{\deriv{#1}{#2}} {#3} }
{\at{\deriv{#1}{#2}}{#3}}
{\at{\deriv{#1}{#2}}#3}
\end_inset
@ -107,6 +125,15 @@ Lie Groups
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\AAdd}[1]{\mathbf{\mathop{Ad}}{}_{#1}}
{\mathbf{\mathop{Ad}}{}_{#1}}
\end_inset
\end_layout
\begin_layout Standard
@ -144,6 +171,12 @@ SO(2)
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\Rone}{\mathfrak{\mathbb{R}}}
{\mathfrak{\mathbb{R}}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\Rtwo}{\mathfrak{\mathbb{R}^{2}}}
{\mathfrak{\mathbb{R}^{2}}}
@ -202,6 +235,12 @@ SE(2)
\end_inset
\begin_inset FormulaMacro
\newcommand{\Skew}[1]{[#1]_{\times}}
{[#1]_{\times}}
\end_inset
\end_layout
\begin_layout Standard
@ -243,7 +282,7 @@ SO(3)
\begin_inset FormulaMacro
\newcommand{\Skew}[1]{[#1]_{\times}}
\renewcommand{\Skew}[1]{[#1]_{\times}}
{[#1]_{\times}}
\end_inset
@ -288,6 +327,86 @@ SE(3)
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status open
\begin_layout Plain Layout
Aff(2),6
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\Afftwo}{Aff(2)}
{Aff(2)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\afftwo}{\mathfrak{aff(2)}}
{\mathfrak{aff(2)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\aa}{a}
{a}
\end_inset
\begin_inset FormulaMacro
\newcommand{\ahat}{\hat{a}}
{\hat{a}}
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Note Comment
status collapsed
\begin_layout Plain Layout
SL(3),8
\end_layout
\end_inset
\end_layout
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\SLthree}{SL(3)}
{SL(3)}
\end_inset
\begin_inset FormulaMacro
\newcommand{\slthree}{\mathfrak{sl(3)}}
{\mathfrak{sl(3)}}
\end_inset
\begin_inset FormulaMacro
\newcommand{\hh}{h}
{h}
\end_inset
\begin_inset FormulaMacro
\newcommand{\hhat}{\hat{h}}
{\hat{h}}
\end_inset
\end_layout
\end_body

View File

@ -1237,21 +1237,28 @@ reference "eq:ApproximateObjective"
\end_inset
.
In particular, the notion of an exponential map allows us to define an
incremental transformation as tracing out a geodesic curve on the group
manifold along a certain
In particular, the notion of an exponential map allows us to define a mapping
from
\series bold
tangent vector
local coordinates
\series default
\begin_inset Formula $\xi$
\end_inset
back to a neighborhood in
\begin_inset Formula $G$
\end_inset
around
\begin_inset Formula $a$
\end_inset
,
\begin_inset Formula
\[
a\oplus\xi\define a\exp\left(\hat{\xi}\right)
\]
\begin{equation}
a\oplus\xi\define a\exp\left(\hat{\xi}\right)\label{eq:expmap}
\end{equation}
\end_inset
@ -1263,11 +1270,12 @@ with
\begin_inset Formula $n$
\end_inset
-dimensional Lie group,
-dimensional Lie group.
Above,
\begin_inset Formula $\hat{\xi}\in\mathfrak{g}$
\end_inset
the Lie algebra element corresponding to the vector
is the Lie algebra element corresponding to the vector
\begin_inset Formula $\xi$
\end_inset
@ -1305,7 +1313,7 @@ For the Lie group
\end_inset
is denoted as
\begin_inset Formula $\omega$
\begin_inset Formula $\omega t$
\end_inset
and represents an angular displacement.
@ -1314,17 +1322,17 @@ For the Lie group
\end_inset
is a skew symmetric matrix denoted as
\begin_inset Formula $\Skew{\omega}\in\sothree$
\begin_inset Formula $\Skew{\omega t}\in\sothree$
\end_inset
, and is given by
\begin_inset Formula
\[
\Skew{\omega}=\left[\begin{array}{ccc}
\Skew{\omega t}=\left[\begin{array}{ccc}
0 & -\omega_{z} & \omega_{y}\\
\omega_{z} & 0 & -\omega_{x}\\
-\omega_{y} & \omega_{x} & 0
\end{array}\right]
\end{array}\right]t
\]
\end_inset
@ -1334,12 +1342,136 @@ Finally, the increment
\end_inset
corresponds to an incremental rotation
\begin_inset Formula $R\oplus\omega=Re^{\Skew{\omega}}$
\begin_inset Formula $R\oplus\omega t=Re^{\Skew{\omega t}}$
\end_inset
.
\end_layout
\begin_layout Subsection
Local Coordinates vs.
Tangent Vectors
\end_layout
\begin_layout Standard
In differential geometry,
\series bold
tangent vectors
\series default
\begin_inset Formula $v\in T_{a}G$
\end_inset
at
\begin_inset Formula $a$
\end_inset
are elements of the Lie algebra
\begin_inset Formula $\mathfrak{g}$
\end_inset
, and are defined as
\begin_inset Formula
\[
v\define\Jac{\gamma(t)}t{t=0}
\]
\end_inset
where
\begin_inset Formula $\gamma$
\end_inset
is some curve that passes through
\begin_inset Formula $a$
\end_inset
at
\begin_inset Formula $t=0$
\end_inset
, i.e.
\begin_inset Formula $\gamma(0)=a$
\end_inset
.
In particular, for any fixed local coordinate
\begin_inset Formula $\xi$
\end_inset
the map
\begin_inset CommandInset ref
LatexCommand eqref
reference "eq:expmap"
\end_inset
can be used to define a
\series bold
geodesic curve
\series default
on the group manifold defined by
\begin_inset Formula $\gamma:t\mapsto ae^{\widehat{t\xi}}$
\end_inset
, and the corresponding tangent vector is given by
\begin_inset Formula
\begin{equation}
\Jac{ae^{\widehat{t\xi}}}t{t=0}=a\xihat\label{eq:tangent-vector}
\end{equation}
\end_inset
This defines the mapping between local coordinates
\begin_inset Formula $\xi\in\Rn$
\end_inset
and actual tangent vectors
\begin_inset Formula $a\xihat\in g$
\end_inset
: the vector
\begin_inset Formula $\xi$
\end_inset
defines a direction of travel on the manifold, but does so in the local
coordinate frame
\begin_inset Formula $a$
\end_inset
.
\end_layout
\begin_layout Example
Assume a rigid body's attitude is described by
\begin_inset Formula $R_{b}^{n}(t)$
\end_inset
, where the indices denote the navigation frame
\begin_inset Formula $N$
\end_inset
and body frame
\begin_inset Formula $B$
\end_inset
, respectively.
An extrinsically calibrated gyroscope measures the angular velocity
\begin_inset Formula $\omega^{b}$
\end_inset
, in the body frame, and the corresponding tangent vector is
\begin_inset Formula
\[
\dot{R}_{b}^{n}(t)=R_{b}^{n}(t)\widehat{\omega^{b}}
\]
\end_inset
\end_layout
\begin_layout Subsection
Derivatives
\end_layout
@ -1352,7 +1484,7 @@ reference "def:differentiable"
\end_inset
to map exponential coordinates
to map local coordinates
\begin_inset Formula $\xi$
\end_inset
@ -1368,7 +1500,7 @@ reference "def:differentiable"
\begin_inset Formula $Df_{a}$
\end_inset
locally approximates a function
approximates the function
\begin_inset Formula $f$
\end_inset
@ -1378,6 +1510,10 @@ reference "def:differentiable"
to
\begin_inset Formula $\Reals m$
\end_inset
in a neighborhood around
\begin_inset Formula $a$
\end_inset
:
@ -1455,41 +1591,6 @@ derivative
.
\end_layout
\begin_layout Standard
Note that the vectors
\begin_inset Formula $\xi$
\end_inset
can be viewed as lying in the tangent space to
\begin_inset Formula $G$
\end_inset
at
\begin_inset Formula $a$
\end_inset
, but defining this rigorously would take us on a longer tour of differential
geometry.
Informally,
\begin_inset Formula $\xi$
\end_inset
is simply the direction, in a local coordinate frame, that is locally tangent
at
\begin_inset Formula $a$
\end_inset
to a geodesic curve
\begin_inset Formula $\gamma:t\mapsto ae^{\widehat{t\xi}}$
\end_inset
traced out by the exponential map, with
\begin_inset Formula $\gamma(0)=a$
\end_inset
.
\end_layout
\begin_layout Subsection
Derivative of an Action
\begin_inset CommandInset label
@ -3000,7 +3101,7 @@ f(ge^{\xhat})=f(ge^{\xhat}g^{-1}g)=f(e^{\Ad g\xhat}g)
\end_layout
\begin_layout Subsection
Derivative of the Exponential and Logarithm Map
Derivative of the Exponential Map
\end_layout
\begin_layout Theorem
@ -3064,17 +3165,196 @@ For
\begin_inset Formula $\xi\neq0$
\end_inset
, things are not simple, see .
, things are not simple.
As with pushforwards above, we will be looking for an
\begin_inset Formula $n\times n$
\end_inset
\begin_inset Flex URL
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\uuline off
\uwave off
\noun off
\color none
Jacobian
\begin_inset Formula $f'(\xi)$
\end_inset
such that
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\uuline default
\uwave default
\noun default
\color inherit
\begin_inset Formula
\begin{equation}
f\left(\xi+\delta\right)\approx f\left(\xi\right)\exp\left(\widehat{f'(\xi)\delta}\right)\label{eq:push_exp}
\end{equation}
\end_inset
Differential geometry tells us that for any Lie algebra element
\begin_inset Formula $\xihat\in\mathfrak{g}$
\end_inset
there exists a
\emph on
linear
\emph default
map
\begin_inset Formula $d\exp_{\xihat}:T_{\xihat}\mathfrak{g}\rightarrow T_{\exp(\xihat)}G$
\end_inset
, which is given by
\begin_inset Foot
status collapsed
\begin_layout Plain Layout
See
\begin_inset Flex URL
status open
http://deltaepsilons.wordpress.com/2009/11/06/helgasons-formula-for-the-differenti
al-of-the-exponential/
\begin_layout Plain Layout
http://deltaepsilons.wordpress.com/2009/11/06/
\end_layout
\end_inset
or
\begin_inset Flex URL
status open
\begin_layout Plain Layout
https://en.wikipedia.org/wiki/Derivative_of_the_exponential_map
\end_layout
\end_inset
.
\end_layout
\end_inset
\begin_inset Formula
\begin{equation}
d\exp_{\xihat}\hat{x}=\exp(\xihat)\frac{1-\exp(-ad_{\xihat})}{ad_{\xihat}}\hat{x}\label{eq:dexp}
\end{equation}
\end_inset
with
\begin_inset Formula $\hat{x}\in T_{\xihat}\mathfrak{g}$
\end_inset
and
\begin_inset Formula $ad_{\xihat}$
\end_inset
itself a linear map taking
\begin_inset Formula $\hat{x}$
\end_inset
to
\begin_inset Formula $[\xihat,\hat{x}]$
\end_inset
, the Lie bracket.
The actual formula above is not really as important as the fact that the
linear map exists, although it is expressed directly in terms of tangent
vectors to
\begin_inset Formula $\mathfrak{g}$
\end_inset
and
\begin_inset Formula $G$
\end_inset
.
Equation
\begin_inset CommandInset ref
LatexCommand eqref
reference "eq:dexp"
\end_inset
is a tangent vector, and comparing with
\begin_inset CommandInset ref
LatexCommand eqref
reference "eq:tangent-vector"
\end_inset
we see that it maps to local coordinates
\begin_inset Formula $y$
\end_inset
as follows:
\begin_inset Formula
\[
\yhat=\frac{1-\exp(-ad_{\xihat})}{ad_{\xihat}}\hat{x}
\]
\end_inset
which can be used to construct the Jacobian
\begin_inset Formula $f'(\xi)$
\end_inset
.
\end_layout
\begin_layout Example
For
\begin_inset Formula $\SOthree$
\end_inset
, the operator
\begin_inset Formula $ad_{\xihat}$
\end_inset
is simply a matrix multiplication when representing
\begin_inset Formula $\sothree$
\end_inset
using 3-vectors, i.e.,
\begin_inset Formula $ad_{\xihat}x=\xihat x$
\end_inset
, and the
\begin_inset Formula $3\times3$
\end_inset
Jacobian corresponding to
\begin_inset Formula $d\exp$
\end_inset
is
\begin_inset Formula
\[
f'(\xi)=\frac{I_{3\times3}-\exp(-\xihat)}{\xihat}=\sum_{k=0}^{\infty}\frac{(-1)^{k}}{(k+1)!}\xihat^{k}
\]
\end_inset
which, similar to the exponential map, has a simple closed form expression
for
\begin_inset Formula $\SOthree$
\end_inset
.
@ -3097,7 +3377,7 @@ Retractions
\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\retract}{\mathcal{R}}
\renewcommand{\retract}{\mathcal{R}}
{\mathcal{R}}
\end_inset
@ -6797,7 +7077,7 @@ Then
\begin_layout Standard
\begin_inset CommandInset bibtex
LatexCommand bibtex
bibfiles "/Users/dellaert/papers/refs"
bibfiles "refs"
options "plain"
\end_inset

26
doc/refs.bib Normal file
View File

@ -0,0 +1,26 @@
@article{Iserles00an,
title = {Lie-group methods},
author = {Iserles, Arieh and Munthe-Kaas, Hans Z and
N{\o}rsett, Syvert P and Zanna, Antonella},
journal = {Acta Numerica 2000},
volume = {9},
pages = {215--365},
year = {2000},
publisher = {Cambridge Univ Press}
}
@book{Murray94book,
title = {A mathematical introduction to robotic manipulation},
author = {Murray, Richard M and Li, Zexiang and Sastry, S
Shankar and Sastry, S Shankara},
year = {1994},
publisher = {CRC press}
}
@book{Spivak65book,
title = {Calculus on manifolds},
author = {Spivak, Michael},
volume = {1},
year = {1965},
publisher = {WA Benjamin New York}
}

View File

@ -35,7 +35,7 @@ void createExampleBALFile(const string& filename, const vector<Point3>& P,
SfM_data data;
// Create two cameras
Rot3 aRb = Rot3::yaw(M_PI_2);
Rot3 aRb = Rot3::Yaw(M_PI_2);
Point3 aTb(0.1, 0, 0);
Pose3 identity, aPb(aRb, aTb);
data.cameras.push_back(SfM_Camera(pose1, K));
@ -66,7 +66,7 @@ void createExampleBALFile(const string& filename, const vector<Point3>& P,
void create5PointExample1() {
// Create two cameras poses
Rot3 aRb = Rot3::yaw(M_PI_2);
Rot3 aRb = Rot3::Yaw(M_PI_2);
Point3 aTb(0.1, 0, 0);
Pose3 pose1, pose2(aRb, aTb);
@ -85,7 +85,7 @@ void create5PointExample1() {
void create5PointExample2() {
// Create two cameras poses
Rot3 aRb = Rot3::yaw(M_PI_2);
Rot3 aRb = Rot3::Yaw(M_PI_2);
Point3 aTb(10, 0, 0);
Pose3 pose1, pose2(aRb, aTb);

1
examples/Data/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.txt

View File

@ -42,7 +42,7 @@
// Also, we will initialize the robot at the origin using a Prior factor.
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/BearingRangeFactor.h>
#include <gtsam/sam/BearingRangeFactor.h>
// When the factors are created, we will add them to a Factor Graph. As the factors we are using
// are nonlinear factors, we will need a Nonlinear Factor Graph.

View File

@ -39,7 +39,7 @@
// have been provided with the library for solving robotics SLAM problems.
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/RangeFactor.h>
#include <gtsam/sam/RangeFactor.h>
#include <gtsam/slam/dataset.h>
// Standard headers, added last, so we know headers above work on their own

View File

@ -97,7 +97,7 @@ int main(int argc, char* argv[]) {
// Intentionally initialize the variables off from the ground truth
Values initialEstimate;
for (size_t i = 0; i < poses.size(); ++i)
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
for (size_t j = 0; j < points.size(); ++j)
initialEstimate.insert(Symbol('l', j), points[j].compose(Point3(-0.25, 0.20, 0.15)));
initialEstimate.print("Initial Estimates:\n");

View File

@ -84,7 +84,7 @@ int main(int argc, char* argv[]) {
// Create perturbed initial
Values initial;
Pose3 delta(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
for (size_t i = 0; i < poses.size(); ++i)
initial.insert(Symbol('x', i), poses[i].compose(delta));
for (size_t j = 0; j < points.size(); ++j)

View File

@ -34,6 +34,9 @@ using namespace gtsam;
// Make the typename short so it looks much cleaner
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
// create a typedef to the camera type
typedef PinholePose<Cal3_S2> Camera;
/* ************************************************************************* */
int main(int argc, char* argv[]) {
@ -55,12 +58,12 @@ int main(int argc, char* argv[]) {
for (size_t j = 0; j < points.size(); ++j) {
// every landmark represent a single landmark, we use shared pointer to init the factor, and then insert measurements.
SmartFactor::shared_ptr smartfactor(new SmartFactor());
SmartFactor::shared_ptr smartfactor(new SmartFactor(measurementNoise, K));
for (size_t i = 0; i < poses.size(); ++i) {
// generate the 2D measurement
SimpleCamera camera(poses[i], *K);
Camera camera(poses[i], K);
Point2 measurement = camera.project(points[j]);
// call add() function to add measurement into a single factor, here we need to add:
@ -68,7 +71,7 @@ int main(int argc, char* argv[]) {
// 2. the corresponding camera's key
// 3. camera noise model
// 4. camera calibration
smartfactor->add(measurement, i, measurementNoise, K);
smartfactor->add(measurement, i);
}
// insert the smart factor in the graph
@ -77,24 +80,24 @@ int main(int argc, char* argv[]) {
// Add a prior on pose x0. This indirectly specifies where the origin is.
// 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas(
noiseModel::Diagonal::shared_ptr noise = noiseModel::Diagonal::Sigmas(
(Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)).finished());
graph.push_back(PriorFactor<Pose3>(0, poses[0], poseNoise));
graph.push_back(PriorFactor<Camera>(0, Camera(poses[0],K), noise));
// Because the structure-from-motion problem has a scale ambiguity, the problem is
// still under-constrained. Here we add a prior on the second pose x1, so this will
// fix the scale by indicating the distance between x0 and x1.
// Because these two are fixed, the rest of the poses will be also be fixed.
graph.push_back(PriorFactor<Pose3>(1, poses[1], poseNoise)); // add directly to graph
graph.push_back(PriorFactor<Camera>(1, Camera(poses[1],K), noise)); // add directly to graph
graph.print("Factor Graph:\n");
// Create the initial estimate to the solution
// Intentionally initialize the variables off from the ground truth
Values initialEstimate;
Pose3 delta(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
for (size_t i = 0; i < poses.size(); ++i)
initialEstimate.insert(i, poses[i].compose(delta));
initialEstimate.insert(i, Camera(poses[i].compose(delta), K));
initialEstimate.print("Initial Estimates:\n");
// Optimize the graph and print results

View File

@ -30,6 +30,9 @@ using namespace gtsam;
// Make the typename short so it looks much cleaner
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
// create a typedef to the camera type
typedef PinholePose<Cal3_S2> Camera;
/* ************************************************************************* */
int main(int argc, char* argv[]) {
@ -51,16 +54,16 @@ int main(int argc, char* argv[]) {
for (size_t j = 0; j < points.size(); ++j) {
// every landmark represent a single landmark, we use shared pointer to init the factor, and then insert measurements.
SmartFactor::shared_ptr smartfactor(new SmartFactor());
SmartFactor::shared_ptr smartfactor(new SmartFactor(measurementNoise, K));
for (size_t i = 0; i < poses.size(); ++i) {
// generate the 2D measurement
SimpleCamera camera(poses[i], *K);
Camera camera(poses[i], K);
Point2 measurement = camera.project(points[j]);
// call add() function to add measurement into a single factor, here we need to add:
smartfactor->add(measurement, i, measurementNoise, K);
smartfactor->add(measurement, i);
}
// insert the smart factor in the graph
@ -69,18 +72,18 @@ int main(int argc, char* argv[]) {
// Add a prior on pose x0. This indirectly specifies where the origin is.
// 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas(
noiseModel::Diagonal::shared_ptr noise = noiseModel::Diagonal::Sigmas(
(Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)).finished());
graph.push_back(PriorFactor<Pose3>(0, poses[0], poseNoise));
graph.push_back(PriorFactor<Camera>(0, Camera(poses[0],K), noise));
// Fix the scale ambiguity by adding a prior
graph.push_back(PriorFactor<Pose3>(1, poses[1], poseNoise));
graph.push_back(PriorFactor<Camera>(1, Camera(poses[0],K), noise));
// Create the initial estimate to the solution
Values initialEstimate;
Pose3 delta(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
for (size_t i = 0; i < poses.size(); ++i)
initialEstimate.insert(i, poses[i].compose(delta));
initialEstimate.insert(i, Camera(poses[i].compose(delta),K));
// We will use LM in the outer optimization loop, but by specifying "Iterative" below
// We indicate that an iterative linear solver should be used.

View File

@ -0,0 +1,144 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file SFMExample.cpp
* @brief This file is to compare the ordering performance for COLAMD vs METIS.
* Example problem is to solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file.
* @author Frank Dellaert, Zhaoyang Lv
*/
// For an explanation of headers, see SFMExample.cpp
#include <gtsam/inference/Symbol.h>
#include <gtsam/inference/Ordering.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/GeneralSFMFactor.h>
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
#include <gtsam/base/timing.h>
#include <vector>
using namespace std;
using namespace gtsam;
using symbol_shorthand::C;
using symbol_shorthand::P;
// We will be using a projection factor that ties a SFM_Camera to a 3D point.
// An SFM_Camera is defined in datase.h as a camera with unknown Cal3Bundler calibration
// and has a total of 9 free parameters
typedef GeneralSFMFactor<SfM_Camera,Point3> MyFactor;
/* ************************************************************************* */
int main (int argc, char* argv[]) {
// Find default file, but if an argument is given, try loading a file
string filename = findExampleDataFile("dubrovnik-3-7-pre");
if (argc>1) filename = string(argv[1]);
// Load the SfM data from file
SfM_data mydata;
readBAL(filename, mydata);
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.number_tracks() % mydata.number_cameras();
// Create a factor graph
NonlinearFactorGraph graph;
// We share *one* noiseModel between all projection factors
noiseModel::Isotropic::shared_ptr noise =
noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
// Add measurements to the factor graph
size_t j = 0;
BOOST_FOREACH(const SfM_Track& track, mydata.tracks) {
BOOST_FOREACH(const SfM_Measurement& m, track.measurements) {
size_t i = m.first;
Point2 uv = m.second;
graph.push_back(MyFactor(uv, noise, C(i), P(j))); // note use of shorthand symbols C and P
}
j += 1;
}
// Add a prior on pose x1. This indirectly specifies where the origin is.
// and a prior on the position of the first landmark to fix the scale
graph.push_back(PriorFactor<SfM_Camera>(C(0), mydata.cameras[0], noiseModel::Isotropic::Sigma(9, 0.1)));
graph.push_back(PriorFactor<Point3> (P(0), mydata.tracks[0].p, noiseModel::Isotropic::Sigma(3, 0.1)));
// Create initial estimate
Values initial;
size_t i = 0; j = 0;
BOOST_FOREACH(const SfM_Camera& camera, mydata.cameras) initial.insert(C(i++), camera);
BOOST_FOREACH(const SfM_Track& track, mydata.tracks) initial.insert(P(j++), track.p);
/** --------------- COMPARISON -----------------------**/
/** ----------------------------------------------------**/
LevenbergMarquardtParams params_using_COLAMD, params_using_METIS;
try {
params_using_METIS.setVerbosity("ERROR");
gttic_(METIS_ORDERING);
params_using_METIS.ordering = Ordering::Create(Ordering::METIS, graph);
gttoc_(METIS_ORDERING);
params_using_COLAMD.setVerbosity("ERROR");
gttic_(COLAMD_ORDERING);
params_using_COLAMD.ordering = Ordering::Create(Ordering::COLAMD, graph);
gttoc_(COLAMD_ORDERING);
} catch (exception& e) {
cout << e.what();
}
// expect they have different ordering results
if(params_using_COLAMD.ordering == params_using_METIS.ordering) {
cout << "COLAMD and METIS produce the same ordering. "
<< "Problem here!!!" << endl;
}
/* Optimize the graph with METIS and COLAMD and time the results */
Values result_METIS, result_COLAMD;
try {
gttic_(OPTIMIZE_WITH_METIS);
LevenbergMarquardtOptimizer lm_METIS(graph, initial, params_using_METIS);
result_METIS = lm_METIS.optimize();
gttoc_(OPTIMIZE_WITH_METIS);
gttic_(OPTIMIZE_WITH_COLAMD);
LevenbergMarquardtOptimizer lm_COLAMD(graph, initial, params_using_COLAMD);
result_COLAMD = lm_COLAMD.optimize();
gttoc_(OPTIMIZE_WITH_COLAMD);
} catch (exception& e) {
cout << e.what();
}
{ // printing the result
cout << "COLAMD final error: " << graph.error(result_COLAMD) << endl;
cout << "METIS final error: " << graph.error(result_METIS) << endl;
cout << endl << endl;
cout << "Time comparison by solving " << filename << " results:" << endl;
cout << boost::format("%1% point tracks and %2% cameras\n") \
% mydata.number_tracks() % mydata.number_cameras() \
<< endl;
tictoc_print_();
}
return 0;
}
/* ************************************************************************* */

View File

@ -82,7 +82,7 @@ int main(int argc, char* argv[]) {
Values initialEstimate;
initialEstimate.insert(Symbol('K', 0), Cal3_S2(60.0, 60.0, 0.0, 45.0, 45.0));
for (size_t i = 0; i < poses.size(); ++i)
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
for (size_t j = 0; j < points.size(); ++j)
initialEstimate.insert(Symbol('l', j), points[j].compose(Point3(-0.25, 0.20, 0.15)));

View File

@ -32,7 +32,7 @@
*/
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/BearingRangeFactor.h>
#include <gtsam/sam/BearingRangeFactor.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/geometry/Pose2.h>
@ -44,6 +44,7 @@
#include <gtsam/inference/Symbol.h>
#include <gtsam/base/timing.h>
#include <gtsam/base/treeTraversal-inst.h>
#include <gtsam/config.h> // for GTSAM_USE_TBB
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
@ -575,7 +576,7 @@ void runStats()
{
cout << "Gathering statistics..." << endl;
GaussianFactorGraph linear = *datasetMeasurements.linearize(initial);
GaussianJunctionTree jt(GaussianEliminationTree(linear, Ordering::colamd(linear)));
GaussianJunctionTree jt(GaussianEliminationTree(linear, Ordering::Colamd(linear)));
treeTraversal::ForestStatistics statistics = treeTraversal::GatherStatistics(jt);
ofstream file;

View File

@ -17,6 +17,7 @@
#include <gtsam/global_includes.h>
#include <gtsam/base/Matrix.h>
#include <boost/assign/list_of.hpp>
#include <boost/foreach.hpp>
#include <map>

View File

@ -95,7 +95,7 @@ int main(int argc, char* argv[]) {
// Add an initial guess for the current pose
// Intentionally initialize the variables off from the ground truth
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
// If this is the first iteration, add a prior on the first pose to set the coordinate frame
// and a prior on the first landmark to set the scale

View File

@ -95,7 +95,7 @@ int main(int argc, char* argv[]) {
}
// Intentionally initialize the variables off from the ground truth
Pose3 noise(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
Pose3 noise(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
Pose3 initial_xi = poses[i].compose(noise);
// Add an initial guess for the current pose

278
gtsam.h
View File

@ -1,4 +1,5 @@
/**
* GTSAM Wrap Module Definition
*
* These are the current classes available through the matlab toolbox interface,
@ -156,7 +157,7 @@ virtual class Value {
size_t dim() const;
};
#include <gtsam/base/LieScalar_Deprecated.h>
#include <gtsam/base/deprecated/LieScalar.h>
class LieScalar {
// Standard constructors
LieScalar();
@ -185,7 +186,7 @@ class LieScalar {
static Vector Logmap(const gtsam::LieScalar& p);
};
#include <gtsam/base/LieVector_Deprecated.h>
#include <gtsam/base/deprecated/LieVector.h>
class LieVector {
// Standard constructors
LieVector();
@ -217,7 +218,7 @@ class LieVector {
void serialize() const;
};
#include <gtsam/base/LieMatrix_Deprecated.h>
#include <gtsam/base/deprecated/LieMatrix.h>
class LieMatrix {
// Standard constructors
LieMatrix();
@ -433,12 +434,12 @@ class Rot3 {
static gtsam::Rot3 Rz(double t);
static gtsam::Rot3 RzRyRx(double x, double y, double z);
static gtsam::Rot3 RzRyRx(Vector xyz);
static gtsam::Rot3 yaw(double t); // positive yaw is to right (as in aircraft heading)
static gtsam::Rot3 pitch(double t); // positive pitch is up (increasing aircraft altitude)
static gtsam::Rot3 roll(double t); // positive roll is to right (increasing yaw in aircraft)
static gtsam::Rot3 ypr(double y, double p, double r);
static gtsam::Rot3 quaternion(double w, double x, double y, double z);
static gtsam::Rot3 rodriguez(Vector v);
static gtsam::Rot3 Yaw(double t); // positive yaw is to right (as in aircraft heading)
static gtsam::Rot3 Pitch(double t); // positive pitch is up (increasing aircraft altitude)
static gtsam::Rot3 Roll(double t); // positive roll is to right (increasing yaw in aircraft)
static gtsam::Rot3 Ypr(double y, double p, double r);
static gtsam::Rot3 Quaternion(double w, double x, double y, double z);
static gtsam::Rot3 Rodrigues(Vector v);
// Testable
void print(string s) const;
@ -812,7 +813,7 @@ class CalibratedCamera {
// Action on Point3
gtsam::Point2 project(const gtsam::Point3& point) const;
static gtsam::Point2 project_to_camera(const gtsam::Point3& cameraPoint);
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
// Standard Interface
gtsam::Pose3 pose() const;
@ -848,7 +849,7 @@ class PinholeCamera {
static size_t Dim();
// Transformations and measurement functions
static gtsam::Point2 project_to_camera(const gtsam::Point3& cameraPoint);
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
pair<gtsam::Point2,bool> projectSafe(const gtsam::Point3& pw) const;
gtsam::Point2 project(const gtsam::Point3& point);
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
@ -884,7 +885,7 @@ virtual class SimpleCamera {
static size_t Dim();
// Transformations and measurement functions
static gtsam::Point2 project_to_camera(const gtsam::Point3& cameraPoint);
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
pair<gtsam::Point2,bool> projectSafe(const gtsam::Point3& pw) const;
gtsam::Point2 project(const gtsam::Point3& point);
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
@ -915,7 +916,7 @@ class StereoCamera {
// Standard Interface
gtsam::Pose3 pose() const;
double baseline() const;
gtsam::Cal3_S2Stereo* calibration() const;
gtsam::Cal3_S2Stereo calibration() const;
// Manifold
gtsam::StereoCamera retract(const Vector& d) const;
@ -1654,12 +1655,12 @@ char symbolChr(size_t key);
size_t symbolIndex(size_t key);
// Default keyformatter
void printKeyList (const gtsam::KeyList& keys);
void printKeyList (const gtsam::KeyList& keys, string s);
void printKeyVector(const gtsam::KeyVector& keys);
void printKeyVector(const gtsam::KeyVector& keys, string s);
void printKeySet (const gtsam::KeySet& keys);
void printKeySet (const gtsam::KeySet& keys, string s);
void PrintKeyList (const gtsam::KeyList& keys);
void PrintKeyList (const gtsam::KeyList& keys, string s);
void PrintKeyVector(const gtsam::KeyVector& keys);
void PrintKeyVector(const gtsam::KeyVector& keys, string s);
void PrintKeySet (const gtsam::KeySet& keys);
void PrintKeySet (const gtsam::KeySet& keys, string s);
#include <gtsam/inference/LabeledSymbol.h>
class LabeledSymbol {
@ -1776,7 +1777,7 @@ class Values {
void swap(gtsam::Values& values);
bool exists(size_t j) const;
gtsam::KeyList keys() const;
gtsam::KeyVector keys() const;
gtsam::VectorValues zeroVectors() const;
@ -1893,8 +1894,6 @@ class KeySet {
class KeyVector {
KeyVector();
KeyVector(const gtsam::KeyVector& other);
KeyVector(const gtsam::KeySet& other);
KeyVector(const gtsam::KeyList& other);
// Note: no print function
@ -2264,7 +2263,7 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor {
};
#include <gtsam/slam/RangeFactor.h>
#include <gtsam/sam/RangeFactor.h>
template<POSE, POINT>
virtual class RangeFactor : gtsam::NoiseModelFactor {
RangeFactor(size_t key1, size_t key2, double measured, const gtsam::noiseModel::Base* noiseModel);
@ -2274,20 +2273,16 @@ typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactorPosePoint2;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Point3> RangeFactorPosePoint3;
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Pose2> RangeFactorPose2;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Pose3> RangeFactorPose3;
// Commented out by Frank Dec 2014: not poses!
// If needed, we need a RangeFactor that takes a camera, extracts the pose
// Should be easy with Expressions
//typedef gtsam::RangeFactor<gtsam::CalibratedCamera, gtsam::Point3> RangeFactorCalibratedCameraPoint;
//typedef gtsam::RangeFactor<gtsam::SimpleCamera, gtsam::Point3> RangeFactorSimpleCameraPoint;
//typedef gtsam::RangeFactor<gtsam::CalibratedCamera, gtsam::CalibratedCamera> RangeFactorCalibratedCamera;
//typedef gtsam::RangeFactor<gtsam::SimpleCamera, gtsam::SimpleCamera> RangeFactorSimpleCamera;
typedef gtsam::RangeFactor<gtsam::CalibratedCamera, gtsam::Point3> RangeFactorCalibratedCameraPoint;
typedef gtsam::RangeFactor<gtsam::SimpleCamera, gtsam::Point3> RangeFactorSimpleCameraPoint;
typedef gtsam::RangeFactor<gtsam::CalibratedCamera, gtsam::CalibratedCamera> RangeFactorCalibratedCamera;
typedef gtsam::RangeFactor<gtsam::SimpleCamera, gtsam::SimpleCamera> RangeFactorSimpleCamera;
#include <gtsam/slam/BearingFactor.h>
template<POSE, POINT, ROTATION>
#include <gtsam/sam/BearingFactor.h>
template<POSE, POINT, BEARING>
virtual class BearingFactor : gtsam::NoiseModelFactor {
BearingFactor(size_t key1, size_t key2, const ROTATION& measured, const gtsam::noiseModel::Base* noiseModel);
BearingFactor(size_t key1, size_t key2, const BEARING& measured, const gtsam::noiseModel::Base* noiseModel);
// enabling serialization functionality
void serialize() const;
@ -2295,19 +2290,18 @@ virtual class BearingFactor : gtsam::NoiseModelFactor {
typedef gtsam::BearingFactor<gtsam::Pose2, gtsam::Point2, gtsam::Rot2> BearingFactor2D;
#include <gtsam/slam/BearingRangeFactor.h>
template<POSE, POINT, ROTATION>
#include <gtsam/sam/BearingRangeFactor.h>
template<POSE, POINT, BEARING, RANGE>
virtual class BearingRangeFactor : gtsam::NoiseModelFactor {
BearingRangeFactor(size_t poseKey, size_t pointKey, const ROTATION& measuredBearing, double measuredRange, const gtsam::noiseModel::Base* noiseModel);
pair<ROTATION, double> measured() const;
BearingRangeFactor(size_t poseKey, size_t pointKey,
const BEARING& measuredBearing, const RANGE& measuredRange,
const gtsam::noiseModel::Base* noiseModel);
// enabling serialization functionality
void serialize() const;
};
typedef gtsam::BearingRangeFactor<gtsam::Pose2, gtsam::Point2, gtsam::Rot2> BearingRangeFactor2D;
typedef gtsam::BearingRangeFactor<gtsam::Pose2, gtsam::Point2, gtsam::Rot2, double> BearingRangeFactor2D;
#include <gtsam/slam/ProjectionFactor.h>
@ -2354,18 +2348,33 @@ virtual class GeneralSFMFactor2 : gtsam::NoiseModelFactor {
void serialize() const;
};
#include <gtsam/slam/SmartProjectionFactor.h>
class SmartProjectionParams {
SmartProjectionParams();
// TODO(frank): make these work:
// void setLinearizationMode(LinearizationMode linMode);
// void setDegeneracyMode(DegeneracyMode degMode);
void setRankTolerance(double rankTol);
void setEnableEPI(bool enableEPI);
void setLandmarkDistanceThreshold(bool landmarkDistanceThreshold);
void setDynamicOutlierRejectionThreshold(bool dynOutRejectionThreshold);
};
#include <gtsam/slam/SmartProjectionPoseFactor.h>
template<CALIBRATION>
virtual class SmartProjectionPoseFactor : gtsam::NonlinearFactor {
virtual class SmartProjectionPoseFactor: gtsam::NonlinearFactor {
SmartProjectionPoseFactor(double rankTol, double linThreshold,
bool manageDegeneracy, bool enableEPI, const gtsam::Pose3& body_P_sensor);
SmartProjectionPoseFactor(const gtsam::noiseModel::Base* noise,
const CALIBRATION* K);
SmartProjectionPoseFactor(const gtsam::noiseModel::Base* noise,
const CALIBRATION* K,
const gtsam::Pose3& body_P_sensor);
SmartProjectionPoseFactor(const gtsam::noiseModel::Base* noise,
const CALIBRATION* K,
const gtsam::Pose3& body_P_sensor,
const gtsam::SmartProjectionParams& params);
SmartProjectionPoseFactor(double rankTol);
SmartProjectionPoseFactor();
void add(const gtsam::Point2& measured_i, size_t poseKey_i,
const gtsam::noiseModel::Base* noise_i, const CALIBRATION* K_i);
void add(const gtsam::Point2& measured_i, size_t poseKey_i);
// enabling serialization functionality
//void serialize() const;
@ -2438,7 +2447,7 @@ namespace imuBias {
#include <gtsam/navigation/ImuBias.h>
class ConstantBias {
// Standard Constructor
// Constructors
ConstantBias();
ConstantBias(Vector biasAcc, Vector biasGyro);
@ -2470,136 +2479,143 @@ class ConstantBias {
}///\namespace imuBias
#include <gtsam/navigation/ImuFactor.h>
class PoseVelocityBias{
PoseVelocityBias(const gtsam::Pose3& pose, Vector velocity, const gtsam::imuBias::ConstantBias& bias);
};
class ImuFactorPreintegratedMeasurements {
// Standard Constructor
ImuFactorPreintegratedMeasurements(const gtsam::imuBias::ConstantBias& bias, Matrix measuredAccCovariance,Matrix measuredOmegaCovariance, Matrix integrationErrorCovariance, bool use2ndOrderIntegration);
ImuFactorPreintegratedMeasurements(const gtsam::imuBias::ConstantBias& bias, Matrix measuredAccCovariance,Matrix measuredOmegaCovariance, Matrix integrationErrorCovariance);
// ImuFactorPreintegratedMeasurements(const gtsam::ImuFactorPreintegratedMeasurements& rhs);
#include <gtsam/navigation/NavState.h>
class NavState {
// Constructors
NavState();
NavState(const gtsam::Rot3& R, const gtsam::Point3& t, Vector v);
NavState(const gtsam::Pose3& pose, Vector v);
// Testable
void print(string s) const;
bool equals(const gtsam::ImuFactorPreintegratedMeasurements& expected, double tol);
bool equals(const gtsam::NavState& expected, double tol) const;
// Access
gtsam::Rot3 attitude() const;
gtsam::Point3 position() const;
Vector velocity() const;
gtsam::Pose3 pose() const;
};
#include <gtsam/navigation/PreintegrationParams.h>
class PreintegrationParams {
PreintegrationParams(Vector n_gravity);
// TODO(frank): add setters/getters or make this MATLAB wrapper feature
};
#include <gtsam/navigation/PreintegrationBase.h>
virtual class PreintegrationBase {
// Constructors
PreintegrationBase(const gtsam::PreintegrationParams* params);
PreintegrationBase(const gtsam::PreintegrationParams* params,
const gtsam::imuBias::ConstantBias& bias);
// Testable
void print(string s) const;
bool equals(const gtsam::PreintegrationBase& expected, double tol);
double deltaTij() const;
Matrix deltaRij() const;
gtsam::Rot3 deltaRij() const;
Vector deltaPij() const;
Vector deltaVij() const;
Vector biasHatVector() const;
Matrix delPdelBiasAcc() const;
Matrix delPdelBiasOmega() const;
Matrix delVdelBiasAcc() const;
Matrix delVdelBiasOmega() const;
Matrix delRdelBiasOmega() const;
Matrix preintMeasCov() const;
// Standard Interface
void integrateMeasurement(Vector measuredAcc, Vector measuredOmega, double deltaT);
void integrateMeasurement(Vector measuredAcc, Vector measuredOmega, double deltaT, const gtsam::Pose3& body_P_sensor);
gtsam::PoseVelocityBias predict(const gtsam::Pose3& pose_i, Vector vel_i, const gtsam::imuBias::ConstantBias& bias,
Vector gravity, Vector omegaCoriolis) const;
gtsam::NavState predict(const gtsam::NavState& state_i,
const gtsam::imuBias::ConstantBias& bias) const;
};
virtual class ImuFactor : gtsam::NonlinearFactor {
ImuFactor(size_t pose_i, size_t vel_i, size_t pose_j, size_t vel_j, size_t bias,
const gtsam::ImuFactorPreintegratedMeasurements& preintegratedMeasurements, Vector gravity, Vector omegaCoriolis);
ImuFactor(size_t pose_i, size_t vel_i, size_t pose_j, size_t vel_j, size_t bias,
const gtsam::ImuFactorPreintegratedMeasurements& preintegratedMeasurements, Vector gravity, Vector omegaCoriolis,
const gtsam::Pose3& body_P_sensor);
#include <gtsam/navigation/ImuFactor.h>
virtual class PreintegratedImuMeasurements: gtsam::PreintegrationBase {
// Constructors
PreintegratedImuMeasurements(const gtsam::PreintegrationParams* params);
PreintegratedImuMeasurements(const gtsam::PreintegrationParams* params,
const gtsam::imuBias::ConstantBias& bias);
// Testable
void print(string s) const;
bool equals(const gtsam::PreintegratedImuMeasurements& expected, double tol);
// Standard Interface
gtsam::ImuFactorPreintegratedMeasurements preintegratedMeasurements() const;
void integrateMeasurement(Vector measuredAcc, Vector measuredOmega,
double deltaT);
void resetIntegration();
Matrix preintMeasCov() const;
};
virtual class ImuFactor: gtsam::NonlinearFactor {
ImuFactor(size_t pose_i, size_t vel_i, size_t pose_j, size_t vel_j,
size_t bias,
const gtsam::PreintegratedImuMeasurements& preintegratedMeasurements);
// Standard Interface
gtsam::PreintegratedImuMeasurements preintegratedMeasurements() const;
Vector evaluateError(const gtsam::Pose3& pose_i, Vector vel_i,
const gtsam::Pose3& pose_j, Vector vel_j,
const gtsam::imuBias::ConstantBias& bias);
};
#include <gtsam/navigation/CombinedImuFactor.h>
class CombinedImuFactorPreintegratedMeasurements {
// Standard Constructor
CombinedImuFactorPreintegratedMeasurements(
const gtsam::imuBias::ConstantBias& bias,
Matrix measuredAccCovariance,
Matrix measuredOmegaCovariance,
Matrix integrationErrorCovariance,
Matrix biasAccCovariance,
Matrix biasOmegaCovariance,
Matrix biasAccOmegaInit);
CombinedImuFactorPreintegratedMeasurements(
const gtsam::imuBias::ConstantBias& bias,
Matrix measuredAccCovariance,
Matrix measuredOmegaCovariance,
Matrix integrationErrorCovariance,
Matrix biasAccCovariance,
Matrix biasOmegaCovariance,
Matrix biasAccOmegaInit,
bool use2ndOrderIntegration);
// CombinedImuFactorPreintegratedMeasurements(const gtsam::CombinedImuFactorPreintegratedMeasurements& rhs);
virtual class PreintegratedCombinedMeasurements: gtsam::PreintegrationBase {
// Testable
void print(string s) const;
bool equals(const gtsam::CombinedImuFactorPreintegratedMeasurements& expected, double tol);
double deltaTij() const;
Matrix deltaRij() const;
Vector deltaPij() const;
Vector deltaVij() const;
Vector biasHatVector() const;
Matrix delPdelBiasAcc() const;
Matrix delPdelBiasOmega() const;
Matrix delVdelBiasAcc() const;
Matrix delVdelBiasOmega() const;
Matrix delRdelBiasOmega() const;
Matrix preintMeasCov() const;
bool equals(const gtsam::PreintegratedCombinedMeasurements& expected,
double tol);
// Standard Interface
void integrateMeasurement(Vector measuredAcc, Vector measuredOmega, double deltaT);
void integrateMeasurement(Vector measuredAcc, Vector measuredOmega, double deltaT, const gtsam::Pose3& body_P_sensor);
gtsam::PoseVelocityBias predict(const gtsam::Pose3& pose_i, Vector vel_i, const gtsam::imuBias::ConstantBias& bias,
Vector gravity, Vector omegaCoriolis) const;
void integrateMeasurement(Vector measuredAcc, Vector measuredOmega,
double deltaT);
void resetIntegration();
Matrix preintMeasCov() const;
};
virtual class CombinedImuFactor : gtsam::NonlinearFactor {
CombinedImuFactor(size_t pose_i, size_t vel_i, size_t pose_j, size_t vel_j, size_t bias_i, size_t bias_j,
const gtsam::CombinedImuFactorPreintegratedMeasurements& CombinedPreintegratedMeasurements, Vector gravity, Vector omegaCoriolis);
virtual class CombinedImuFactor: gtsam::NonlinearFactor {
CombinedImuFactor(size_t pose_i, size_t vel_i, size_t pose_j, size_t vel_j,
size_t bias_i, size_t bias_j,
const gtsam::PreintegratedCombinedMeasurements& CombinedPreintegratedMeasurements);
// Standard Interface
gtsam::CombinedImuFactorPreintegratedMeasurements preintegratedMeasurements() const;
gtsam::PreintegratedCombinedMeasurements preintegratedMeasurements() const;
Vector evaluateError(const gtsam::Pose3& pose_i, Vector vel_i,
const gtsam::Pose3& pose_j, Vector vel_j,
const gtsam::imuBias::ConstantBias& bias_i,
const gtsam::imuBias::ConstantBias& bias_j);
};
#include <gtsam/navigation/AHRSFactor.h>
class AHRSFactorPreintegratedMeasurements {
class PreintegratedAhrsMeasurements {
// Standard Constructor
AHRSFactorPreintegratedMeasurements(Vector bias, Matrix measuredOmegaCovariance);
AHRSFactorPreintegratedMeasurements(Vector bias, Matrix measuredOmegaCovariance);
AHRSFactorPreintegratedMeasurements(const gtsam::AHRSFactorPreintegratedMeasurements& rhs);
PreintegratedAhrsMeasurements(Vector bias, Matrix measuredOmegaCovariance);
PreintegratedAhrsMeasurements(Vector bias, Matrix measuredOmegaCovariance);
PreintegratedAhrsMeasurements(const gtsam::PreintegratedAhrsMeasurements& rhs);
// Testable
void print(string s) const;
bool equals(const gtsam::AHRSFactorPreintegratedMeasurements& expected, double tol);
bool equals(const gtsam::PreintegratedAhrsMeasurements& expected, double tol);
// get Data
Matrix deltaRij() const;
gtsam::Rot3 deltaRij() const;
double deltaTij() const;
Vector biasHat() const;
// Standard Interface
void integrateMeasurement(Vector measuredOmega, double deltaT);
void integrateMeasurement(Vector measuredOmega, double deltaT, const gtsam::Pose3& body_P_sensor);
void resetIntegration() ;
};
virtual class AHRSFactor : gtsam::NonlinearFactor {
AHRSFactor(size_t rot_i, size_t rot_j,size_t bias,
const gtsam::AHRSFactorPreintegratedMeasurements& preintegratedMeasurements, Vector omegaCoriolis);
const gtsam::PreintegratedAhrsMeasurements& preintegratedMeasurements, Vector omegaCoriolis);
AHRSFactor(size_t rot_i, size_t rot_j, size_t bias,
const gtsam::AHRSFactorPreintegratedMeasurements& preintegratedMeasurements, Vector omegaCoriolis,
const gtsam::PreintegratedAhrsMeasurements& preintegratedMeasurements, Vector omegaCoriolis,
const gtsam::Pose3& body_P_sensor);
// Standard Interface
gtsam::AHRSFactorPreintegratedMeasurements preintegratedMeasurements() const;
gtsam::PreintegratedAhrsMeasurements preintegratedMeasurements() const;
Vector evaluateError(const gtsam::Rot3& rot_i, const gtsam::Rot3& rot_j,
Vector bias) const;
gtsam::Rot3 predict(const gtsam::Rot3& rot_i, Vector bias,
const gtsam::AHRSFactorPreintegratedMeasurements& preintegratedMeasurements,
const gtsam::PreintegratedAhrsMeasurements& preintegratedMeasurements,
Vector omegaCoriolis) const;
};

View File

@ -1,4 +1,4 @@
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
node: 10219c95fe653d4962aa9db4946f6fbea96dd740
node: b30b87236a1b1552af32ac34075ee5696a9b5a33
branch: 3.2
tag: 3.2.4
tag: 3.2.7

View File

@ -27,3 +27,6 @@ ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6

View File

@ -301,7 +301,7 @@ if(EIGEN_INCLUDE_INSTALL_DIR)
)
else()
set(INCLUDE_INSTALL_DIR
"${CMAKE_INSTALL_PREFIX}/include/eigen3"
"include/eigen3"
CACHE INTERNAL
"The directory where we install the header files (internal)"
)
@ -404,7 +404,7 @@ if(cmake_generator_tolower MATCHES "makefile")
message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:")
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath")
message(STATUS " | Eigen headers will then be installed to:")
message(STATUS " | ${INCLUDE_INSTALL_DIR}")
message(STATUS " | ${CMAKE_INSTALL_PREFIX}/${INCLUDE_INSTALL_DIR}")
message(STATUS " | To install Eigen headers to a separate location, do:")
message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath")
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")

View File

@ -123,7 +123,7 @@
#undef bool
#undef vector
#undef pixel
#elif defined __ARM_NEON__
#elif defined __ARM_NEON
#define EIGEN_VECTORIZE
#define EIGEN_VECTORIZE_NEON
#include <arm_neon.h>

View File

@ -14,7 +14,7 @@
/**
* \defgroup SparseCore_Module SparseCore module
*
* This module provides a sparse matrix representation, and basic associatd matrix manipulations
* This module provides a sparse matrix representation, and basic associated matrix manipulations
* and operations.
*
* See the \ref TutorialSparse "Sparse tutorial"

View File

@ -235,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
/** \internal
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
@ -434,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
template<typename MatrixType, int _UpLo>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
check_template_parameters();
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
@ -457,7 +464,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
*/
template<typename MatrixType, int _UpLo>
template<typename Derived>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
{
const Index size = w.rows();
if (m_isInitialized)

View File

@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
/** \internal
* Used to compute and store L
* The strict upper part is not used and even not initialized.
@ -283,7 +289,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
return k;
mat.coeffRef(k,k) = x = sqrt(x);
if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
if (rs>0) A21 *= RealScalar(1)/x;
if (rs>0) A21 /= x;
}
return -1;
}
@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
template<typename MatrixType, int _UpLo>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
check_template_parameters();
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
m_matrix.resize(size, size);

View File

@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
lda = m.outerStride(); \
\
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
info = (info==0) ? -1 : 1; \
info = (info==0) ? -1 : info>0 ? info-1 : size; \
return info; \
} \
}; \

View File

@ -78,7 +78,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
{
res.itype = CHOLMOD_INT;
}
else if (internal::is_same<_Index,UF_long>::value)
else if (internal::is_same<_Index,SuiteSparse_long>::value)
{
res.itype = CHOLMOD_LONG;
}
@ -395,7 +395,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
{
init();
compute(matrix);
Base::compute(matrix);
}
~CholmodSimplicialLLT() {}
@ -442,7 +442,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
{
init();
compute(matrix);
Base::compute(matrix);
}
~CholmodSimplicialLDLT() {}
@ -487,7 +487,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
{
init();
compute(matrix);
Base::compute(matrix);
}
~CholmodSupernodalLLT() {}
@ -534,7 +534,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
CholmodDecomposition(const MatrixType& matrix) : Base()
{
init();
compute(matrix);
Base::compute(matrix);
}
~CholmodDecomposition() {}

View File

@ -124,6 +124,21 @@ class Array
}
#endif
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
Array(Array&& other)
: Base(std::move(other))
{
Base::_check_template_params();
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
Base::_set_noalias(other);
}
Array& operator=(Array&& other)
{
other.swap(*this);
return *this;
}
#endif
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
*
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,

View File

@ -46,9 +46,6 @@ template<typename Derived> class ArrayBase
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
@ -56,6 +53,7 @@ template<typename Derived> class ArrayBase
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseBase<Derived> Base;
using Base::operator*;
using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime;

View File

@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
typedef packet_traits<typename Derived1::Scalar> PacketTraits;
typedef typename Derived1::Scalar Scalar;
typedef packet_traits<Scalar> PacketTraits;
enum {
packetSize = PacketTraits::size,
alignable = PacketTraits::AlignedOnScalar,
dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
dstAlignment = alignable ? Aligned : int(dstIsAligned),
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
};
const Scalar *dst_ptr = &dst.coeffRef(0,0);
if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0)
{
// the pointer is not aligend-on scalar, so alignment is not possible
return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
}
const Index packetAlignedMask = packetSize - 1;
const Index innerSize = dst.innerSize();
const Index outerSize = dst.outerSize();
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
: internal::first_aligned(&dst.coeffRef(0,0), innerSize);
Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
for(Index outer = 0; outer < outerSize; ++outer)
{

View File

@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
: int(traits<XprType>::MaxColsAtCompileTime),
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
IsDense = is_same<StorageKind,Dense>::value,
IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
: (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
: XprTypeIsRowMajor,
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),

View File

@ -1,154 +0,0 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMMAINITIALIZER_H
#define EIGEN_COMMAINITIALIZER_H
namespace Eigen {
/** \class CommaInitializer
* \ingroup Core_Module
*
* \brief Helper class used by the comma initializer operator
*
* This class is internally used to implement the comma initializer feature. It is
* the return type of MatrixBase::operator<<, and most of the time this is the only
* way it is used.
*
* \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
*/
template<typename XprType>
struct CommaInitializer
{
typedef typename XprType::Scalar Scalar;
typedef typename XprType::Index Index;
inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{
m_xpr.coeffRef(0,0) = s;
}
template<typename OtherDerived>
inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
: m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
{
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
}
/* Copy/Move constructor which transfers ownership. This is crucial in
* absence of return value optimization to avoid assertions during destruction. */
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
inline CommaInitializer(const CommaInitializer& o)
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
// Mark original object as finished. In absence of R-value references we need to const_cast:
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
}
/* inserts a scalar value in the target matrix */
CommaInitializer& operator,(const Scalar& s)
{
if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = 1;
eigen_assert(m_row<m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==1);
m_xpr.coeffRef(m_row, m_col++) = s;
return *this;
}
/* inserts a matrix expression in the target matrix */
template<typename OtherDerived>
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{
if(other.cols()==0 || other.rows()==0)
return *this;
if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = other.rows();
eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
eigen_assert(m_currentBlockRows==other.rows());
if (OtherDerived::SizeAtCompileTime != Dynamic)
m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
(m_row, m_col) = other;
else
m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
m_col += other.cols();
return *this;
}
inline ~CommaInitializer()
{
eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
&& m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
}
/** \returns the built matrix once all its coefficients have been set.
* Calling finished is 100% optional. Its purpose is to write expressions
* like this:
* \code
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode
*/
inline XprType& finished() { return m_xpr; }
XprType& m_xpr; // target expression
Index m_row; // current row id
Index m_col; // current col id
Index m_currentBlockRows; // current block height
};
/** \anchor MatrixBaseCommaInitRef
* Convenient operator to set the coefficients of a matrix.
*
* The coefficients must be provided in a row major order and exactly match
* the size of the matrix. Otherwise an assertion is raised.
*
* Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out
*
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
*
* \sa CommaInitializer::finished(), class CommaInitializer
*/
template<typename Derived>
inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
{
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
}
/** \sa operator<<(const Scalar&) */
template<typename Derived>
template<typename OtherDerived>
inline CommaInitializer<Derived>
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
{
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
}
} // end namespace Eigen
#endif // EIGEN_COMMAINITIALIZER_H

View File

@ -81,7 +81,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
)
),
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
Cost0 = EIGEN_ADD_COST(LhsCoeffReadCost,RhsCoeffReadCost),
CoeffReadCost = EIGEN_ADD_COST(Cost0,functor_traits<BinaryOp>::Cost)
};
};
} // end namespace internal

View File

@ -47,7 +47,7 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
Flags = _XprTypeNested::Flags & (
HereditaryBits | LinearAccessBit | AlignedBit
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
CoeffReadCost = EIGEN_ADD_COST(_XprTypeNested::CoeffReadCost, functor_traits<UnaryOp>::Cost)
};
};
}

View File

@ -41,14 +41,13 @@ static inline void check_DenseIndex_is_signed() {
template<typename Derived> class DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
DenseCoeffsBase<Derived> >
#else
: public DenseCoeffsBase<Derived>
#endif // not EIGEN_PARSED_BY_DOXYGEN
{
public:
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
class InnerIterator;
@ -63,8 +62,9 @@ template<typename Derived> class DenseBase
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef internal::special_scalar_op_base<Derived,Scalar,RealScalar, DenseCoeffsBase<Derived> > Base;
typedef DenseCoeffsBase<Derived> Base;
using Base::operator*;
using Base::derived;
using Base::const_cast_derived;
using Base::rows;
@ -183,10 +183,6 @@ template<typename Derived> class DenseBase
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
inline Index nonZeros() const { return size(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
* \sa rows(), cols(), IsVectorAtCompileTime. */
/** \returns the outer size.
*
@ -266,11 +262,13 @@ template<typename Derived> class DenseBase
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& func);
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Copies \a other into *this without evaluating other. \returns a reference to *this. */
/** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
template<typename OtherDerived>
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** \internal Evaluates \a other into *this. \returns a reference to *this. */
template<typename OtherDerived>
Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
CommaInitializer<Derived> operator<< (const Scalar& s);

View File

@ -122,33 +122,41 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
{
internal::plain_array<T,Size,_Options> m_data;
public:
inline DenseStorage() {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
DenseStorage() {}
DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()) {}
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
static inline DenseIndex rows(void) {return _Rows;}
static inline DenseIndex cols(void) {return _Cols;}
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {}
DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other) m_data = other.m_data;
return *this;
}
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
static DenseIndex rows(void) {return _Rows;}
static DenseIndex cols(void) {return _Cols;}
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
void resize(DenseIndex,DenseIndex,DenseIndex) {}
const T *data() const { return m_data.array; }
T *data() { return m_data.array; }
};
// null matrix
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
{
public:
inline DenseStorage() {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(DenseStorage& ) {}
static inline DenseIndex rows(void) {return _Rows;}
static inline DenseIndex cols(void) {return _Cols;}
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
inline const T *data() const { return 0; }
inline T *data() { return 0; }
DenseStorage() {}
DenseStorage(internal::constructor_without_unaligned_array_assert) {}
DenseStorage(const DenseStorage&) {}
DenseStorage& operator=(const DenseStorage&) { return *this; }
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
void swap(DenseStorage& ) {}
static DenseIndex rows(void) {return _Rows;}
static DenseIndex cols(void) {return _Cols;}
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
void resize(DenseIndex,DenseIndex,DenseIndex) {}
const T *data() const { return 0; }
T *data() { return 0; }
};
// more specializations for null matrices; these are necessary to resolve ambiguities
@ -168,18 +176,29 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
DenseIndex m_rows;
DenseIndex m_cols;
public:
inline DenseStorage() : m_rows(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
DenseStorage() : m_rows(0), m_cols(0) {}
DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
inline void swap(DenseStorage& other)
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
m_data = other.m_data;
m_rows = other.m_rows;
m_cols = other.m_cols;
}
return *this;
}
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows() const {return m_rows;}
inline DenseIndex cols() const {return m_cols;}
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
DenseIndex rows() const {return m_rows;}
DenseIndex cols() const {return m_cols;}
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
const T *data() const { return m_data.array; }
T *data() { return m_data.array; }
};
// dynamic-size matrix with fixed-size storage and fixed width
@ -188,17 +207,27 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows;
public:
inline DenseStorage() : m_rows(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
DenseStorage() : m_rows(0) {}
DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline DenseIndex rows(void) const {return m_rows;}
inline DenseIndex cols(void) const {return _Cols;}
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
m_data = other.m_data;
m_rows = other.m_rows;
}
return *this;
}
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
DenseIndex rows(void) const {return m_rows;}
DenseIndex cols(void) const {return _Cols;}
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
const T *data() const { return m_data.array; }
T *data() { return m_data.array; }
};
// dynamic-size matrix with fixed-size storage and fixed height
@ -207,17 +236,27 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_cols;
public:
inline DenseStorage() : m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
DenseStorage() : m_cols(0) {}
DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows(void) const {return _Rows;}
inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
DenseStorage& operator=(const DenseStorage& other)
{
if (this != &other)
{
m_data = other.m_data;
m_cols = other.m_cols;
}
return *this;
}
DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
DenseIndex rows(void) const {return _Rows;}
DenseIndex cols(void) const {return m_cols;}
void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
const T *data() const { return m_data.array; }
T *data() { return m_data.array; }
};
// purely dynamic matrix.
@ -227,18 +266,35 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
DenseIndex m_rows;
DenseIndex m_cols;
public:
inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {}
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
inline void swap(DenseStorage& other)
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
DenseStorage(DenseStorage&& other)
: m_data(std::move(other.m_data))
, m_rows(std::move(other.m_rows))
, m_cols(std::move(other.m_cols))
{
other.m_data = nullptr;
}
DenseStorage& operator=(DenseStorage&& other)
{
using std::swap;
swap(m_data, other.m_data);
swap(m_rows, other.m_rows);
swap(m_cols, other.m_cols);
return *this;
}
#endif
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows(void) const {return m_rows;}
inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
DenseIndex rows(void) const {return m_rows;}
DenseIndex cols(void) const {return m_cols;}
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
m_rows = nbRows;
@ -258,8 +314,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
m_rows = nbRows;
m_cols = nbCols;
}
inline const T *data() const { return m_data; }
inline T *data() { return m_data; }
const T *data() const { return m_data; }
T *data() { return m_data; }
private:
DenseStorage(const DenseStorage&);
DenseStorage& operator=(const DenseStorage&);
};
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
@ -268,15 +327,30 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
T *m_data;
DenseIndex m_cols;
public:
inline DenseStorage() : m_data(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
DenseStorage() : m_data(0), m_cols(0) {}
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
static inline DenseIndex rows(void) {return _Rows;}
inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
DenseStorage(DenseStorage&& other)
: m_data(std::move(other.m_data))
, m_cols(std::move(other.m_cols))
{
other.m_data = nullptr;
}
DenseStorage& operator=(DenseStorage&& other)
{
using std::swap;
swap(m_data, other.m_data);
swap(m_cols, other.m_cols);
return *this;
}
#endif
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
static DenseIndex rows(void) {return _Rows;}
DenseIndex cols(void) const {return m_cols;}
void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
m_cols = nbCols;
@ -294,8 +368,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
}
m_cols = nbCols;
}
inline const T *data() const { return m_data; }
inline T *data() { return m_data; }
const T *data() const { return m_data; }
T *data() { return m_data; }
private:
DenseStorage(const DenseStorage&);
DenseStorage& operator=(const DenseStorage&);
};
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
@ -304,15 +381,30 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
T *m_data;
DenseIndex m_rows;
public:
inline DenseStorage() : m_data(0), m_rows(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
DenseStorage() : m_data(0), m_rows(0) {}
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline DenseIndex rows(void) const {return m_rows;}
static inline DenseIndex cols(void) {return _Cols;}
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
DenseStorage(DenseStorage&& other)
: m_data(std::move(other.m_data))
, m_rows(std::move(other.m_rows))
{
other.m_data = nullptr;
}
DenseStorage& operator=(DenseStorage&& other)
{
using std::swap;
swap(m_data, other.m_data);
swap(m_rows, other.m_rows);
return *this;
}
#endif
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
DenseIndex rows(void) const {return m_rows;}
static DenseIndex cols(void) {return _Cols;}
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
m_rows = nbRows;
@ -330,8 +422,11 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
}
m_rows = nbRows;
}
inline const T *data() const { return m_data; }
inline T *data() { return m_data; }
const T *data() const { return m_data; }
T *data() { return m_data; }
private:
DenseStorage(const DenseStorage&);
DenseStorage& operator=(const DenseStorage&);
};
} // end namespace Eigen

View File

@ -34,8 +34,9 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
Cost0 = EIGEN_ADD_COST(NumTraits<Scalar>::MulCost, MatrixType::CoeffReadCost),
CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost)
};
};
}

View File

@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
};
};
/** \internal
* \brief Template functors for comparison of two scalars
* \todo Implement packet-comparisons
*/
template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
template<typename Scalar, ComparisonName cmp>
struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
enum {
Cost = NumTraits<Scalar>::AddCost,
PacketAccess = false
};
};
template<ComparisonName Cmp, typename Scalar>
struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
typedef bool type;
};
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
};
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
};
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
};
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
};
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
};
// unary functors:
/** \internal

View File

@ -257,7 +257,7 @@ template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, OuterProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
{
template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
@ -281,22 +281,22 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
template<typename Dest>
inline void evalTo(Dest& dest) const {
internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
internal::outer_product_selector_run(*this, dest, set(), is_row_major<Dest>());
}
template<typename Dest>
inline void addTo(Dest& dest) const {
internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
internal::outer_product_selector_run(*this, dest, add(), is_row_major<Dest>());
}
template<typename Dest>
inline void subTo(Dest& dest) const {
internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
internal::outer_product_selector_run(*this, dest, sub(), is_row_major<Dest>());
}
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major<Dest>());
}
};

View File

@ -123,7 +123,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
}
inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
{
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
checkSanity();
@ -157,7 +157,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
internal::inner_stride_at_compile_time<Derived>::ret==1),
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
&& "data is not aligned");
&& "input pointer is not aligned on a 16 byte boundary");
}
PointerType m_data;

View File

@ -294,7 +294,7 @@ struct hypot_impl
RealScalar _x = abs(x);
RealScalar _y = abs(y);
RealScalar p = (max)(_x, _y);
if(p==RealScalar(0)) return 0;
if(p==RealScalar(0)) return RealScalar(0);
RealScalar q = (min)(_x, _y);
RealScalar qp = q/p;
return p * sqrt(RealScalar(1) + qp*qp);

View File

@ -211,6 +211,21 @@ class Matrix
: Base(internal::constructor_without_unaligned_array_assert())
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
Matrix(Matrix&& other)
: Base(std::move(other))
{
Base::_check_template_params();
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
Base::_set_noalias(other);
}
Matrix& operator=(Matrix&& other)
{
other.swap(*this);
return *this;
}
#endif
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
*
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,

View File

@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& other);
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ProductDerived, typename Lhs, typename Rhs>
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
template<typename MatrixPower, typename Lhs, typename Rhs>
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
#endif // not EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
Derived& operator+=(const MatrixBase<OtherDerived>& other);
@ -442,6 +440,15 @@ template<typename Derived> class MatrixBase
template<typename OtherScalar>
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
///////// SparseCore module /////////
template<typename OtherDerived>
EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const
{
return other.cwiseProduct(derived());
}
///////// MatrixFunctions module /////////
typedef typename internal::stem_function<Scalar>::type StemFunction;

View File

@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
template<typename Other> friend
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
*
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
*/
Index determinant() const
{
Index res = 1;
Index n = size();
Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
mask.fill(false);
Index r = 0;
while(r < n)
{
// search for the next seed
while(r<n && mask[r]) r++;
if(r>=n)
break;
// we got one, let's follow it until we are back to the seed
Index k0 = r++;
mask.coeffRef(k0) = true;
for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
{
mask.coeffRef(k) = true;
res = -res;
}
}
return res;
}
protected:

View File

@ -437,6 +437,36 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
}
#endif
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
PlainObjectBase(PlainObjectBase&& other)
: m_storage( std::move(other.m_storage) )
{
}
PlainObjectBase& operator=(PlainObjectBase&& other)
{
using std::swap;
swap(m_storage, other.m_storage);
return *this;
}
#endif
/** Copy constructor */
EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)
: m_storage()
{
_check_template_params();
lazyAssign(other);
}
template<typename OtherDerived>
EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)
: m_storage()
{
_check_template_params();
lazyAssign(other);
}
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
: m_storage(a_size, nbRows, nbCols)
{
@ -573,6 +603,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
: (rows() == other.rows() && cols() == other.cols())))
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
EIGEN_ONLY_USED_FOR_DEBUG(other);
if(this->size()==0)
resizeLike(other);
#else
resizeLike(other);
#endif

View File

@ -247,8 +247,9 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
}
};
template<typename Func, typename Derived>
struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
template<typename Func, typename Derived, int Unrolling>
struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;

View File

@ -108,7 +108,8 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
OuterStrideMatch = Derived::IsVectorAtCompileTime
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
@ -187,9 +188,11 @@ protected:
template<typename PlainObjectType, int Options, typename StrideType> class Ref
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
{
private:
typedef internal::traits<Ref> Traits;
template<typename Derived>
inline Ref(const PlainObjectBase<Derived>& expr);
inline Ref(const PlainObjectBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
public:
typedef RefBase<Ref> Base;
@ -198,13 +201,15 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived>
inline Ref(PlainObjectBase<Derived>& expr)
inline Ref(PlainObjectBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
{
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.derived());
}
template<typename Derived>
inline Ref(const DenseBase<Derived>& expr)
inline Ref(const DenseBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
#else
template<typename Derived>
inline Ref(DenseBase<Derived>& expr)
@ -231,13 +236,23 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
inline Ref(const DenseBase<Derived>& expr)
inline Ref(const DenseBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
{
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
inline Ref(const Ref& other) : Base(other) {
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
template<typename OtherRef>
inline Ref(const RefBase<OtherRef>& other) {
construct(other.derived(), typename Traits::template match<OtherRef>::type());
}
protected:

View File

@ -72,6 +72,8 @@ template<typename Derived> class ReturnByValue
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
template<int LoadMode> Unusable& packet(Index) const;
template<int LoadMode> Unusable& packet(Index, Index) const;
#endif
};
@ -83,6 +85,15 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
{
other.evalTo(derived());
return derived();
}
} // end namespace Eigen
#endif // EIGEN_RETURNBYVALUE_H

View File

@ -180,15 +180,9 @@ inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
template<typename Derived>
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
internal::scalar_quotient_op<Scalar>,
internal::scalar_product_op<Scalar> >::type BinOp;
typedef typename Derived::PlainObject PlainObject;
SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
Scalar actual_other;
if(NumTraits<Scalar>::IsInteger) actual_other = other;
else actual_other = Scalar(1)/other;
tmp = PlainObject::Constant(rows(),cols(), actual_other);
SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
tmp = PlainObject::Constant(rows(),cols(), other);
return derived();
}

View File

@ -384,6 +384,7 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
a_lo = vget_low_s32(a);
a_hi = vget_high_s32(a);
max = vpmax_s32(a_lo, a_hi);
max = vpmax_s32(max, max);
return vget_lane_s32(max, 0);
}

View File

@ -126,7 +126,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
Packet4f tmp = _mm_setzero_ps(), fx;
Packet4f tmp, fx;
Packet4i emm0;
// clamp x
@ -195,7 +195,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
_EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
Packet2d tmp = _mm_setzero_pd(), fx;
Packet2d tmp, fx;
Packet4i emm0;
// clamp x
@ -279,7 +279,7 @@ Packet4f psin<Packet4f>(const Packet4f& _x)
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
Packet4f xmm1, xmm2, xmm3, sign_bit, y;
Packet4i emm0, emm2;
sign_bit = x;
@ -378,7 +378,7 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
Packet4f xmm1, xmm2, xmm3, y;
Packet4i emm0, emm2;
x = pabs(x);

View File

@ -134,7 +134,7 @@ class CoeffBasedProduct
};
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
Unroll ? InnerSize : Dynamic,
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
@ -185,7 +185,7 @@ class CoeffBasedProduct
{
PacketScalar res;
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
Unroll ? InnerSize : Dynamic,
_LhsNested, _RhsNested, PacketScalar, LoadMode>
::run(row, col, m_lhs, m_rhs, res);
return res;
@ -243,7 +243,17 @@ struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
}
};
@ -251,9 +261,9 @@ template<typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
{
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
res = RetScalar(0);
}
};
@ -293,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
{
res = 0;
}
};
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
@ -302,8 +322,7 @@ struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, Re
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
Packet pres;
product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
res = predux(pres);
}
};
@ -371,7 +390,7 @@ struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
{
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
}
};
@ -382,12 +401,12 @@ struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
{
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
@ -397,7 +416,7 @@ struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
@ -406,16 +425,35 @@ struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
{
res = pset1<Packet>(0);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
{
res = pset1<Packet>(0);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
{
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
for(Index i = 1; i < lhs.cols(); ++i)
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
res = pset1<Packet>(0);
for(Index i = 0; i < lhs.cols(); ++i)
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
}
};
@ -425,10 +463,9 @@ struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
{
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
for(Index i = 1; i < lhs.cols(); ++i)
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
res = pset1<Packet>(0);
for(Index i = 0; i < lhs.cols(); ++i)
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
}
};

View File

@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
if(transpose)
std::swap(rows,cols);
Index blockCols = (cols / threads) & ~Index(0x3);
Index blockRows = (rows / threads) & ~Index(0x7);
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
#pragma omp parallel for schedule(static,1) num_threads(threads)
for(Index i=0; i<threads; ++i)
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads) & ~Index(0x7);
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;

View File

@ -109,7 +109,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
if (rows != depth) { \
\
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
\
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
@ -223,7 +223,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
if (cols != depth) { \
\
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
\
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \

View File

@ -302,9 +302,12 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
for (Index i=0; i<actual_mc; ++i)
r[i] -= a[i] * b;
}
Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
for (Index i=0; i<actual_mc; ++i)
r[i] *= b;
if((Mode & UnitDiag)==0)
{
Scalar b = conj(rhs(j,j));
for (Index i=0; i<actual_mc; ++i)
r[i] /= b;
}
}
// pack the just computed part of lhs to A

View File

@ -433,6 +433,19 @@ struct MatrixXpr {};
/** The type used to identify an array expression */
struct ArrayXpr {};
namespace internal {
/** \internal
* Constants for comparison functors
*/
enum ComparisonName {
cmp_EQ = 0,
cmp_LT = 1,
cmp_LE = 2,
cmp_UNORD = 3,
cmp_NEQ = 4
};
}
} // end namespace Eigen
#endif // EIGEN_CONSTANTS_H

View File

@ -235,6 +235,9 @@ template<typename Scalar> class Rotation2D;
template<typename Scalar> class AngleAxis;
template<typename Scalar,int Dim> class Translation;
// Sparse module:
template<typename Derived> class SparseMatrixBase;
#ifdef EIGEN2_SUPPORT
template<typename Derived, int _Dim> class eigen2_RotationBase;
template<typename Lhs, typename Rhs> class eigen2_Cross;

View File

@ -76,6 +76,38 @@
#include <mkl_lapacke.h>
#define EIGEN_MKL_VML_THRESHOLD 128
/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
/* MKL_BLAS, etc are not defined in 11.2 */
#ifdef MKL_DOMAIN_ALL
#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
#else
#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
#endif
#ifdef MKL_DOMAIN_BLAS
#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
#else
#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
#endif
#ifdef MKL_DOMAIN_FFT
#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
#else
#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
#endif
#ifdef MKL_DOMAIN_VML
#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
#else
#define EIGEN_MKL_DOMAIN_VML MKL_VML
#endif
#ifdef MKL_DOMAIN_PARDISO
#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
#else
#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
#endif
namespace Eigen {
typedef std::complex<double> dcomplex;

View File

@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 4
#define EIGEN_MINOR_VERSION 7
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@ -96,6 +96,20 @@
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif
// A Clang feature extension to determine compiler features.
// We use it to determine 'cxx_rvalue_references'
#ifndef __has_feature
# define __has_feature(x) 0
#endif
// Do we support r-value references?
#if (__has_feature(cxx_rvalue_references) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || \
(defined(_MSC_VER) && _MSC_VER >= 1600))
#define EIGEN_HAVE_RVALUE_REFERENCES
#endif
// Cross compiler wrapper around LLVM's __has_builtin
#ifdef __has_builtin
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
@ -278,6 +292,7 @@ namespace Eigen {
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
#endif
#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
#if EIGEN_ALIGN_STATICALLY
@ -332,8 +347,11 @@ namespace Eigen {
}
#endif
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
/** \internal
* \brief Macro to manually inherit assignment operators.
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
*/
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
/**
* Just a side note. Commenting within defines works only by documenting
@ -405,6 +423,8 @@ namespace Eigen {
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
: ((int)a >= (int)b) ? (int)a : (int)b)
#define EIGEN_ADD_COST(a,b) int(a)==Dynamic || int(b)==Dynamic ? Dynamic : int(a)+int(b)
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
#define EIGEN_IMPLIES(a,b) (!(a) || (b))

View File

@ -523,7 +523,7 @@ template<typename T> struct smart_copy_helper<T,false> {
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
#ifndef EIGEN_ALLOCA
#if (defined __linux__)
#if (defined __linux__) || (defined __APPLE__) || (defined alloca)
#define EIGEN_ALLOCA alloca
#elif defined(_MSC_VER)
#define EIGEN_ALLOCA _alloca

View File

@ -366,17 +366,17 @@ struct dense_xpr_base<Derived, ArrayXpr>
/** \internal Helper base class to add a scalar multiple operator
* overloads for complex types */
template<typename Derived,typename Scalar,typename OtherScalar,
template<typename Derived, typename Scalar, typename OtherScalar, typename BaseType,
bool EnableIt = !is_same<Scalar,OtherScalar>::value >
struct special_scalar_op_base : public DenseCoeffsBase<Derived>
struct special_scalar_op_base : public BaseType
{
// dummy operator* so that the
// "using special_scalar_op_base::operator*" compiles
void operator*() const;
};
template<typename Derived,typename Scalar,typename OtherScalar>
struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCoeffsBase<Derived>
template<typename Derived,typename Scalar,typename OtherScalar, typename BaseType>
struct special_scalar_op_base<Derived,Scalar,OtherScalar,BaseType,true> : public BaseType
{
const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
operator*(const OtherScalar& scalar) const

View File

@ -234,6 +234,12 @@ template<typename _MatrixType> class ComplexEigenSolver
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
EigenvectorType m_eivec;
EigenvalueType m_eivalues;
ComplexSchur<MatrixType> m_schur;
@ -251,6 +257,8 @@ template<typename MatrixType>
ComplexEigenSolver<MatrixType>&
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
check_template_parameters();
// this code is inspired from Jampack
eigen_assert(matrix.cols() == matrix.rows());

View File

@ -298,6 +298,13 @@ template<typename _MatrixType> class EigenSolver
void doComputeEigenvectors();
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
}
MatrixType m_eivec;
EigenvalueType m_eivalues;
bool m_isInitialized;
@ -364,6 +371,8 @@ template<typename MatrixType>
EigenSolver<MatrixType>&
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
check_template_parameters();
using std::sqrt;
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());

View File

@ -263,6 +263,13 @@ template<typename _MatrixType> class GeneralizedEigenSolver
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
}
MatrixType m_eivec;
ComplexVectorType m_alphas;
VectorType m_betas;
@ -290,6 +297,8 @@ template<typename MatrixType>
GeneralizedEigenSolver<MatrixType>&
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
{
check_template_parameters();
using std::sqrt;
using std::abs;
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());

View File

@ -240,10 +240,10 @@ namespace Eigen {
m_S.coeffRef(i,j) = Scalar(0.0);
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(i-1,i,G);
}
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(i-1,i,G);
// kill T(i,i-1)
if(m_T.coeff(i,i-1)!=Scalar(0))
{
@ -251,10 +251,10 @@ namespace Eigen {
m_T.coeffRef(i,i-1) = Scalar(0.0);
m_S.applyOnTheRight(i,i-1,G);
m_T.topRows(i).applyOnTheRight(i,i-1,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
}
}

View File

@ -234,7 +234,7 @@ template<typename _MatrixType> class RealSchur
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
Index findSmallSubdiagEntry(Index iu);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
@ -286,7 +286,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
{
while (iu >= 0)
{
Index il = findSmallSubdiagEntry(iu, norm);
Index il = findSmallSubdiagEntry(iu);
// Check for convergence
if (il == iu) // One root found
@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
if (s == 0.0)
s = norm;
if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
@ -457,9 +455,7 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
{
break;
}
}
}

View File

@ -80,6 +80,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
/** \brief Real scalar type for \p _MatrixType.
*
@ -225,7 +227,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
*
* \sa eigenvalues()
*/
const MatrixType& eigenvectors() const
const EigenvectorsType& eigenvectors() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
@ -351,7 +353,12 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
#endif // EIGEN2_SUPPORT
protected:
MatrixType m_eivec;
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
EigenvectorsType m_eivec;
RealVectorType m_eivalues;
typename TridiagonalizationType::SubDiagonalType m_subdiag;
ComputationInfo m_info;
@ -376,7 +383,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
* "implicit symmetric QR step with Wilkinson shift"
*/
namespace internal {
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
template<typename RealScalar, typename Scalar, typename Index>
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
}
@ -384,6 +391,8 @@ template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::compute(const MatrixType& matrix, int options)
{
check_template_parameters();
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
@ -406,7 +415,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
// declare some aliases
RealVectorType& diag = m_eivalues;
MatrixType& mat = m_eivec;
EigenvectorsType& mat = m_eivec;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
mat = matrix.template triangularView<Lower>();
@ -442,7 +451,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
while (start>0 && m_subdiag[start-1]!=0)
start--;
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
}
if (iter <= m_maxIterations * n)
@ -490,7 +499,13 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename SolverType::EigenvectorsType EigenvectorsType;
/** \internal
* Computes the roots of the characteristic polynomial of \a m.
* For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
*/
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
@ -510,158 +525,123 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
// Construct the parameters used in classifying the roots of the equation
// and in solving the equation for the roots in closed form.
Scalar c2_over_3 = c2*s_inv3;
Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
if (a_over_3 > Scalar(0))
Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
if(a_over_3<Scalar(0))
a_over_3 = Scalar(0);
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3;
if (q > Scalar(0))
Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
if(q<Scalar(0))
q = Scalar(0);
// Compute the eigenvalues by solving for the roots of the polynomial.
Scalar rho = sqrt(-a_over_3);
Scalar theta = atan2(sqrt(-q),half_b)*s_inv3;
Scalar rho = sqrt(a_over_3);
Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
Scalar cos_theta = cos(theta);
Scalar sin_theta = sin(theta);
roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
// Sort in increasing order.
if (roots(0) >= roots(1))
std::swap(roots(0),roots(1));
if (roots(1) >= roots(2))
{
std::swap(roots(1),roots(2));
if (roots(0) >= roots(1))
std::swap(roots(0),roots(1));
}
// roots are already sorted, since cos is monotonically decreasing on [0, pi]
roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
}
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
{
using std::abs;
Index i0;
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
mat.diagonal().cwiseAbs().maxCoeff(&i0);
// mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
// so let's save it:
representative = mat.col(i0);
Scalar n0, n1;
VectorType c0, c1;
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
if(n0>n1) res = c0/std::sqrt(n0);
else res = c1/std::sqrt(n1);
return true;
}
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
using std::sqrt;
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
MatrixType& eivecs = solver.m_eivec;
EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar scale = mat.cwiseAbs().maxCoeff();
MatrixType scaledMat = mat / scale;
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar shift = mat.trace() / Scalar(3);
// TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
MatrixType scaledMat = mat.template selfadjointView<Lower>();
scaledMat.diagonal().array() -= shift;
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
// compute the eigenvalues
computeRoots(scaledMat,eivals);
// compute the eigen vectors
// compute the eigenvectors
if(computeEigenvectors)
{
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
// All three eigenvalues are numerically the same
eivecs.setIdentity();
}
else
{
scaledMat = scaledMat.template selfadjointView<Lower>();
MatrixType tmp;
tmp = scaledMat;
// Compute the eigenvector of the most distinct eigenvalue
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
int k = d0 > d1 ? 2 : 0;
d0 = d0 > d1 ? d0 : d1;
tmp.diagonal().array () -= eivals(k);
VectorType cross;
Scalar n;
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
if(n>safeNorm2)
Index k(0), l(2);
if(d0 > d1)
{
eivecs.col(k) = cross / sqrt(n);
std::swap(k,l);
d0 = d1;
}
// Compute the eigenvector of index k
{
tmp.diagonal().array () -= eivals(k);
// By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
}
// Compute eigenvector of index l
if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
{
// If d0 is too small, then the two other eigenvalues are numerically the same,
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
eivecs.col(l).normalize();
}
else
{
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
tmp = scaledMat;
tmp.diagonal().array () -= eivals(l);
if(n>safeNorm2)
{
eivecs.col(k) = cross / sqrt(n);
}
else
{
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2)
{
eivecs.col(k) = cross / sqrt(n);
}
else
{
// the input matrix and/or the eigenvaues probably contains some inf/NaN,
// => exit
// scale back to the original size.
eivals *= scale;
solver.m_info = NumericalIssue;
solver.m_isInitialized = true;
solver.m_eigenvectorsOk = computeEigenvectors;
return;
}
}
VectorType dummy;
extract_kernel(tmp, eivecs.col(l), dummy);
}
tmp = scaledMat;
tmp.diagonal().array() -= eivals(1);
if(d0<=Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
}
else
{
n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
if(n>safeNorm2)
{
eivecs.col(1) = cross / sqrt(n);
}
else
{
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
if(n>safeNorm2)
eivecs.col(1) = cross / sqrt(n);
else
{
n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2)
eivecs.col(1) = cross / sqrt(n);
else
{
// we should never reach this point,
// if so the last two eigenvalues are likely to be very close to each other
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
}
}
}
// make sure that eivecs[1] is orthogonal to eivecs[2]
// FIXME: this step should not be needed
Scalar d = eivecs.col(1).dot(eivecs.col(k));
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
}
eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized();
// Compute last eigenvector from the other two
eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
}
}
// Rescale back to the original size.
eivals *= scale;
eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
@ -675,11 +655,12 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
typedef typename SolverType::EigenvectorsType EigenvectorsType;
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
roots(1) = t1 + t0;
@ -688,13 +669,15 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
using std::sqrt;
using std::abs;
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
MatrixType& eivecs = solver.m_eivec;
EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
@ -708,22 +691,29 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
// compute the eigen vectors
if(computeEigenvectors)
{
scaledMat.diagonal().array () -= eivals(1);
Scalar a2 = numext::abs2(scaledMat(0,0));
Scalar c2 = numext::abs2(scaledMat(1,1));
Scalar b2 = numext::abs2(scaledMat(1,0));
if(a2>c2)
if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
eivecs.col(1) /= sqrt(a2+b2);
eivecs.setIdentity();
}
else
{
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
eivecs.col(1) /= sqrt(c2+b2);
}
scaledMat.diagonal().array () -= eivals(1);
Scalar a2 = numext::abs2(scaledMat(0,0));
Scalar c2 = numext::abs2(scaledMat(1,1));
Scalar b2 = numext::abs2(scaledMat(1,0));
if(a2>c2)
{
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
eivecs.col(1) /= sqrt(a2+b2);
}
else
{
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
eivecs.col(1) /= sqrt(c2+b2);
}
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
}
}
// Rescale back to the original size.
@ -746,7 +736,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
}
namespace internal {
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
template<typename RealScalar, typename Scalar, typename Index>
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
using std::abs;
@ -798,8 +788,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
{
// FIXME if StorageOrder == RowMajor this operation is not very efficient
Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
q.applyOnTheRight(k,k+1,rot);
}
}

View File

@ -19,10 +19,12 @@ namespace Eigen {
*
* \brief An axis aligned box
*
* \param _Scalar the type of the scalar coefficients
* \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
* \tparam _Scalar the type of the scalar coefficients
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
*
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
* \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
* \sa alignedboxtypedefs
*/
template <typename _Scalar, int _AmbientDim>
class AlignedBox
@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
enum CornerType
{
/** 1D names */
/** 1D names @{ */
Min=0, Max=1,
/** @} */
/** Added names for 2D */
/** Identifier for 2D corner @{ */
BottomLeft=0, BottomRight=1,
TopLeft=2, TopRight=3,
/** @} */
/** Added names for 3D */
/** Identifier for 3D corner @{ */
BottomLeftFloor=0, BottomRightFloor=1,
TopLeftFloor=2, TopRightFloor=3,
BottomLeftCeil=4, BottomRightCeil=5,
TopLeftCeil=6, TopRightCeil=7
/** @} */
};
@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
{ setEmpty(); }
/** Constructs a box with extremities \a _min and \a _max. */
/** Constructs a box with extremities \a _min and \a _max.
* \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
template<typename OtherVectorType1, typename OtherVectorType2>
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
/** Constructs a box containing a single point \a p. */
template<typename Derived>
inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
{
typename internal::nested<Derived,2>::type p(a_p.derived());
m_min = p;
m_max = p;
}
inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
{ }
~AlignedBox() {}
/** \returns the dimension in which the box holds */
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
/** \deprecated use isEmpty */
/** \deprecated use isEmpty() */
inline bool isNull() const { return isEmpty(); }
/** \deprecated use setEmpty */
/** \deprecated use setEmpty() */
inline void setNull() { setEmpty(); }
/** \returns true if the box is empty. */
/** \returns true if the box is empty.
* \sa setEmpty */
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
/** Makes \c *this an empty box. */
/** Makes \c *this an empty box.
* \sa isEmpty */
inline void setEmpty()
{
m_min.setConstant( ScalarTraits::highest() );
@ -159,7 +163,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
* a uniform distribution */
inline VectorType sample() const
{
VectorType r;
VectorType r(dim());
for(Index d=0; d<dim(); ++d)
{
if(!ScalarTraits::IsInteger)
@ -175,27 +179,34 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns true if the point \a p is inside the box \c *this. */
template<typename Derived>
inline bool contains(const MatrixBase<Derived>& a_p) const
inline bool contains(const MatrixBase<Derived>& p) const
{
typename internal::nested<Derived,2>::type p(a_p.derived());
return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
typename internal::nested<Derived,2>::type p_n(p.derived());
return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
}
/** \returns true if the box \a b is entirely inside the box \c *this. */
inline bool contains(const AlignedBox& b) const
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
/** \returns true if the box \a b is intersecting the box \c *this.
* \sa intersection, clamp */
inline bool intersects(const AlignedBox& b) const
{ return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
* \sa extend(const AlignedBox&) */
template<typename Derived>
inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
inline AlignedBox& extend(const MatrixBase<Derived>& p)
{
typename internal::nested<Derived,2>::type p(a_p.derived());
m_min = m_min.cwiseMin(p);
m_max = m_max.cwiseMax(p);
typename internal::nested<Derived,2>::type p_n(p.derived());
m_min = m_min.cwiseMin(p_n);
m_max = m_max.cwiseMax(p_n);
return *this;
}
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
* \sa merged, extend(const MatrixBase&) */
inline AlignedBox& extend(const AlignedBox& b)
{
m_min = m_min.cwiseMin(b.m_min);
@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
/** Clamps \c *this by the box \a b and returns a reference to \c *this.
* \note If the boxes don't intersect, the resulting box is empty.
* \sa intersection(), intersects() */
inline AlignedBox& clamp(const AlignedBox& b)
{
m_min = m_min.cwiseMax(b.m_min);
@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
/** Returns an AlignedBox that is the intersection of \a b and \c *this */
/** Returns an AlignedBox that is the intersection of \a b and \c *this
* \note If the boxes don't intersect, the resulting box is empty.
* \sa intersects(), clamp, contains() */
inline AlignedBox intersection(const AlignedBox& b) const
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
/** Returns an AlignedBox that is the union of \a b and \c *this */
/** Returns an AlignedBox that is the union of \a b and \c *this.
* \note Merging with an empty box may result in a box bigger than \c *this.
* \sa extend(const AlignedBox&) */
inline AlignedBox merged(const AlignedBox& b) const
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns the squared distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
* \sa exteriorDistance()
* \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
*/
template<typename Derived>
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
/** \returns the squared distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa exteriorDistance()
* \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
*/
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
/** \returns the distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
* \sa squaredExteriorDistance()
* \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
*/
template<typename Derived>
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa squaredExteriorDistance()
* \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
*/
inline NonInteger exteriorDistance(const AlignedBox& b) const
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }

View File

@ -131,7 +131,7 @@ public:
m_angle = Scalar(other.angle());
}
static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
@ -165,8 +165,8 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
Scalar n2 = q.vec().squaredNorm();
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
{
m_angle = 0;
m_axis << 1, 0, 0;
m_angle = Scalar(0);
m_axis << Scalar(1), Scalar(0), Scalar(0);
}
else
{

View File

@ -79,7 +79,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
{
if( (int(Direction)==Vertical && row==m_matrix.rows())
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
return 1;
return Scalar(1);
return m_matrix.coeff(row, col);
}

View File

@ -102,11 +102,11 @@ public:
/** \returns a quaternion representing an identity rotation
* \sa MatrixBase::Identity()
*/
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
*/
inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }
/** \returns the squared norm of the quaternion's coefficients
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
@ -161,7 +161,7 @@ public:
{ return coeffs().isApprox(other.coeffs(), prec); }
/** return the result vector of \a v through the rotation*/
EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
/** \returns \c *this with scalar type casted to \a NewScalarType
*
@ -231,7 +231,7 @@ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
public:
typedef _Scalar Scalar;
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
using Base::operator*=;
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
@ -341,7 +341,7 @@ class Map<const Quaternion<_Scalar>, _Options >
public:
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
@ -378,7 +378,7 @@ class Map<Quaternion<_Scalar>, _Options >
public:
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
@ -461,7 +461,7 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const Quaterni
*/
template <class Derived>
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
QuaternionBase<Derived>::_transformVector(Vector3 v) const
QuaternionBase<Derived>::_transformVector(const Vector3& v) const
{
// Note that this algorithm comes from the optimization by hand
// of the conversion to a Matrix followed by a Matrix/Vector product.
@ -637,7 +637,7 @@ inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Der
{
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
Scalar n2 = this->squaredNorm();
if (n2 > 0)
if (n2 > Scalar(0))
return Quaternion<Scalar>(conjugate().coeffs() / n2);
else
{
@ -667,12 +667,10 @@ template <class OtherDerived>
inline typename internal::traits<Derived>::Scalar
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
{
using std::acos;
using std::atan2;
using std::abs;
Scalar d = abs(this->dot(other));
if (d>=Scalar(1))
return Scalar(0);
return Scalar(2) * acos(d);
Quaternion<Scalar> d = (*this) * other.conjugate();
return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
}
@ -712,7 +710,7 @@ QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerive
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
scale1 = sin( ( t * theta) ) / sinTheta;
}
if(d<0) scale1 = -scale1;
if(d<Scalar(0)) scale1 = -scale1;
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
}

View File

@ -65,10 +65,10 @@ class DiagonalPreconditioner
{
typename MatType::InnerIterator it(mat,j);
while(it && it.index()!=j) ++it;
if(it && it.index()==j)
if(it && it.index()==j && it.value()!=Scalar(0))
m_invdiag(j) = Scalar(1)/it.value();
else
m_invdiag(j) = 0;
m_invdiag(j) = Scalar(1);
}
m_isInitialized = true;
return *this;

View File

@ -151,20 +151,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method. Here is a step by
* step execution example starting with a random guess and printing the evolution
* of the estimated error:
* * \code
* x = VectorXd::Random(n);
* solver.setMaxIterations(1);
* int i = 0;
* do {
* x = solver.solveWithGuess(b,x);
* std::cout << i << " : " << solver.error() << std::endl;
* ++i;
* } while (solver.info()!=Success && i<100);
* \endcode
* Note that such a step by step excution is slightly slower.
* One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@ -199,7 +186,8 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
BiCGSTAB(const MatrixType& A) : Base(A) {}
template<typename MatrixDerived>
explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~BiCGSTAB() {}

View File

@ -112,9 +112,9 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
* Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
@ -137,20 +137,7 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method. Here is a step by
* step execution example starting with a random guess and printing the evolution
* of the estimated error:
* * \code
* x = VectorXd::Random(n);
* cg.setMaxIterations(1);
* int i = 0;
* do {
* x = cg.solveWithGuess(b,x);
* std::cout << i << " : " << cg.error() << std::endl;
* ++i;
* } while (cg.info()!=Success && i<100);
* \endcode
* Note that such a step by step excution is slightly slower.
* One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@ -189,7 +176,8 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
ConjugateGradient(const MatrixType& A) : Base(A) {}
template<typename MatrixDerived>
explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~ConjugateGradient() {}
@ -213,6 +201,10 @@ public:
template<typename Rhs,typename Dest>
void _solveWithGuess(const Rhs& b, Dest& x) const
{
typedef typename internal::conditional<UpLo==(Lower|Upper),
const MatrixType&,
SparseSelfAdjointView<const MatrixType, UpLo>
>::type MatrixWrapperType;
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@ -222,8 +214,7 @@ public:
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
Base::m_preconditioner, m_iterations, m_error);
internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
@ -234,7 +225,7 @@ public:
template<typename Rhs,typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
x.setOnes();
x.setZero();
_solveWithGuess(b,x);
}

View File

@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable
{
analyzePattern(amat);
factorize(amat);
m_isInitialized = m_factorizationIsOk;
return *this;
}
@ -160,7 +159,7 @@ class IncompleteLUT : internal::noncopyable
template<typename Rhs, typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
x = m_Pinv * b;
x = m_Pinv * b;
x = m_lu.template triangularView<UnitLower>().solve(x);
x = m_lu.template triangularView<Upper>().solve(x);
x = m_P * x;
@ -223,18 +222,29 @@ template<typename _MatrixType>
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
{
// Compute the Fill-reducing permutation
// Since ILUT does not perform any numerical pivoting,
// it is highly preferable to keep the diagonal through symmetric permutations.
#ifndef EIGEN_MPL2_ONLY
// To this end, let's symmetrize the pattern and perform AMD on it.
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
// Symmetrize the pattern
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
AtA.prune(keep_diag());
internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
m_Pinv = m_P.inverse(); // ... and the inverse permutation
AMDOrdering<Index> ordering;
ordering(AtA,m_P);
m_Pinv = m_P.inverse(); // cache the inverse permutation
#else
// If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
COLAMDOrdering<Index> ordering;
ordering(mat1,m_Pinv);
m_P = m_Pinv.inverse();
#endif
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_isInitialized = false;
}
template <typename Scalar>
@ -442,6 +452,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
m_lu.makeCompressed();
m_factorizationIsOk = true;
m_isInitialized = m_factorizationIsOk;
m_info = Success;
}

View File

@ -49,10 +49,11 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
IterativeSolverBase(const MatrixType& A)
template<typename InputDerived>
IterativeSolverBase(const EigenBase<InputDerived>& A)
{
init();
compute(A);
compute(A.derived());
}
~IterativeSolverBase() {}
@ -62,9 +63,11 @@ public:
* Currently, this function mostly call analyzePattern on the preconditioner. In the future
* we might, for instance, implement column reodering for faster matrix vector products.
*/
Derived& analyzePattern(const MatrixType& A)
template<typename InputDerived>
Derived& analyzePattern(const EigenBase<InputDerived>& A)
{
m_preconditioner.analyzePattern(A);
grabInput(A.derived());
m_preconditioner.analyzePattern(*mp_matrix);
m_isInitialized = true;
m_analysisIsOk = true;
m_info = Success;
@ -80,11 +83,12 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
Derived& factorize(const MatrixType& A)
template<typename InputDerived>
Derived& factorize(const EigenBase<InputDerived>& A)
{
grabInput(A.derived());
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
mp_matrix = &A;
m_preconditioner.factorize(A);
m_preconditioner.factorize(*mp_matrix);
m_factorizationIsOk = true;
m_info = Success;
return derived();
@ -100,10 +104,11 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
Derived& compute(const MatrixType& A)
template<typename InputDerived>
Derived& compute(const EigenBase<InputDerived>& A)
{
mp_matrix = &A;
m_preconditioner.compute(A);
grabInput(A.derived());
m_preconditioner.compute(*mp_matrix);
m_isInitialized = true;
m_analysisIsOk = true;
m_factorizationIsOk = true;
@ -212,6 +217,28 @@ public:
}
protected:
template<typename InputDerived>
void grabInput(const EigenBase<InputDerived>& A)
{
// we const cast to prevent the creation of a MatrixType temporary by the compiler.
grabInput_impl(A.const_cast_derived());
}
template<typename InputDerived>
void grabInput_impl(const EigenBase<InputDerived>& A)
{
m_copyMatrix = A;
mp_matrix = &m_copyMatrix;
}
void grabInput_impl(MatrixType& A)
{
if(MatrixType::RowsAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==Dynamic)
m_copyMatrix.resize(0,0);
mp_matrix = &A;
}
void init()
{
m_isInitialized = false;
@ -220,6 +247,7 @@ protected:
m_maxIterations = -1;
m_tolerance = NumTraits<Scalar>::epsilon();
}
MatrixType m_copyMatrix;
const MatrixType* mp_matrix;
Preconditioner m_preconditioner;

View File

@ -374,6 +374,12 @@ template<typename _MatrixType> class FullPivLU
inline Index cols() const { return m_lu.cols(); }
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
MatrixType m_lu;
PermutationPType m_p;
PermutationQType m_q;
@ -418,6 +424,8 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
template<typename MatrixType>
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
{
check_template_parameters();
// the permutations are stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());

View File

@ -171,6 +171,12 @@ template<typename _MatrixType> class PartialPivLU
inline Index cols() const { return m_lu.cols(); }
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
MatrixType m_lu;
PermutationType m_p;
TranspositionType m_rowsTranspositions;
@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
template<typename MatrixType>
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
{
check_template_parameters();
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<NumTraits<int>::highest());

View File

@ -137,22 +137,27 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
degree[i] = len[i]; // degree of node i
}
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
elen[n] = -2; /* n is a dead element */
Cp[n] = -1; /* n is a root of assembly tree */
w[n] = 0; /* n is a dead element */
/* --- Initialize degree lists ------------------------------------------ */
for(i = 0; i < n; i++)
{
bool has_diag = false;
for(p = Cp[i]; p<Cp[i+1]; ++p)
if(Ci[p]==i)
{
has_diag = true;
break;
}
d = degree[i];
if(d == 0) /* node i is empty */
if(d == 1 && has_diag) /* node i is empty */
{
elen[i] = -2; /* element i is dead */
nel++;
Cp[i] = -1; /* i is a root of assembly tree */
w[i] = 0;
}
else if(d > dense) /* node i is dense */
else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
{
nv[i] = 0; /* absorb i into element n */
elen[i] = -1; /* node i is dead */
@ -168,6 +173,10 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
}
}
elen[n] = -2; /* n is a dead element */
Cp[n] = -1; /* n is a root of assembly tree */
w[n] = 0; /* n is a dead element */
while (nel < n) /* while (selecting pivots) do */
{
/* --- Select node of minimum approximate degree -------------------- */

View File

@ -384,6 +384,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
MatrixType m_qr;
HCoeffsType m_hCoeffs;
PermutationType m_colsPermutation;
@ -422,6 +428,8 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
template<typename MatrixType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
check_template_parameters();
using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
@ -463,20 +471,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
// we store that back into our table: it can't hurt to correct our table.
m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
// if the current biggest column is smaller than epsilon times the initial biggest column,
// terminate to avoid generating nan/inf values.
// Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so)
// repetitions of the unit test, with the result of solve() filled with large values of the order
// of 1/(size*epsilon).
if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
{
// Track the number of meaningful pivots but do not stop the decomposition to make
// sure that the initial matrix is properly reproduced. See bug 941.
if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
m_nonzero_pivots = k;
m_hCoeffs.tail(size-k).setZero();
m_qr.bottomRightCorner(rows-k,cols-k)
.template triangularView<StrictlyLower>()
.setZero();
break;
}
// apply the transposition to the columns
m_colsTranspositions.coeffRef(k) = biggest_col_index;
@ -505,7 +503,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
}
m_colsPermutation.setIdentity(PermIndexType(cols));
for(PermIndexType k = 0; k < m_nonzero_pivots; ++k)
for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@ -555,13 +553,15 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
} // end namespace internal
/** \returns the matrix Q as a sequence of householder transformations */
/** \returns the matrix Q as a sequence of householder transformations.
* You can extract the meaningful part only by using:
* \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
template<typename MatrixType>
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
::householderQ() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots);
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
}
/** \return the column-pivoting Householder QR decomposition of \c *this.

View File

@ -63,12 +63,12 @@ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynami
\
m_nonzero_pivots = 0; \
m_maxpivot = RealScalar(0);\
m_colsPermutation.resize((int)cols); \
m_colsPermutation.resize(cols); \
m_colsPermutation.indices().setZero(); \
\
lapack_int lda = (lapack_int) m_qr.outerStride(), i; \
lapack_int lda = m_qr.outerStride(), i; \
lapack_int matrix_order = MKLCOLROW; \
LAPACKE_##MKLPREFIX##geqp3( matrix_order, (lapack_int)rows, (lapack_int)cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
m_isInitialized = true; \
m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
m_hCoeffs.adjointInPlace(); \

View File

@ -368,6 +368,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
RealScalar maxPivot() const { return m_maxpivot; }
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
MatrixType m_qr;
HCoeffsType m_hCoeffs;
IntDiagSizeVectorType m_rows_transpositions;
@ -407,6 +413,8 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
template<typename MatrixType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
check_template_parameters();
using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();

View File

@ -189,6 +189,12 @@ template<typename _MatrixType> class HouseholderQR
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
MatrixType m_qr;
HCoeffsType m_hCoeffs;
RowVectorType m_temp;
@ -349,6 +355,8 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
template<typename MatrixType>
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
check_template_parameters();
Index rows = matrix.rows();
Index cols = matrix.cols();
Index size = (std::min)(rows,cols);

View File

@ -47,7 +47,7 @@ namespace Eigen {
* You can then apply it to a vector.
*
* R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
* NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index
* NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index
*
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
* NOTE
@ -59,24 +59,18 @@ class SPQR
public:
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef UF_long Index ;
typedef SuiteSparse_long Index ;
typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
public:
SPQR()
: m_isInitialized(false),
m_ordering(SPQR_ORDERING_DEFAULT),
m_allow_tol(SPQR_DEFAULT_TOL),
m_tolerance (NumTraits<Scalar>::epsilon())
: m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
}
SPQR(const _MatrixType& matrix)
: m_isInitialized(false),
m_ordering(SPQR_ORDERING_DEFAULT),
m_allow_tol(SPQR_DEFAULT_TOL),
m_tolerance (NumTraits<Scalar>::epsilon())
SPQR(const _MatrixType& matrix)
: m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
compute(matrix);
@ -101,10 +95,26 @@ class SPQR
if(m_isInitialized) SPQR_free();
MatrixType mat(matrix);
/* Compute the default threshold as in MatLab, see:
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
*/
RealScalar pivotThreshold = m_tolerance;
if(m_useDefaultThreshold)
{
using std::max;
RealScalar max2Norm = 0.0;
for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm());
if(max2Norm==RealScalar(0))
max2Norm = RealScalar(1);
pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
}
cholmod_sparse A;
A = viewAsCholmod(mat);
Index col = matrix.cols();
m_rank = SuiteSparseQR<Scalar>(m_ordering, m_tolerance, col, &A,
m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
if (!m_cR)
@ -120,7 +130,7 @@ class SPQR
/**
* Get the number of rows of the input matrix and the Q matrix
*/
inline Index rows() const {return m_H->nrow; }
inline Index rows() const {return m_cR->nrow; }
/**
* Get the number of columns of the input matrix.
@ -145,16 +155,25 @@ class SPQR
{
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
eigen_assert(b.cols()==1 && "This method is for vectors only");
//Compute Q^T * b
typename Dest::PlainObject y;
typename Dest::PlainObject y, y2;
y = matrixQ().transpose() * b;
// Solves with the triangular matrix R
// Solves with the triangular matrix R
Index rk = this->rank();
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y.topRows(rk));
y.bottomRows(cols()-rk).setZero();
y2 = y;
y.resize((std::max)(cols(),Index(y.rows())),y.cols());
y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
// Apply the column permutation
dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
// colsPermutation() performs a copy of the permutation,
// so let's apply it manually:
for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
// y.bottomRows(y.rows()-rk).setZero();
// dest = colsPermutation() * y.topRows(cols());
m_info = Success;
}
@ -197,7 +216,11 @@ class SPQR
/// Set the fill-reducing ordering method to be used
void setSPQROrdering(int ord) { m_ordering = ord;}
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
void setPivotThreshold(const RealScalar& tol)
{
m_useDefaultThreshold = false;
m_tolerance = tol;
}
/** \returns a pointer to the SPQR workspace */
cholmod_common *cholmodCommon() const { return &m_cc; }
@ -230,6 +253,7 @@ class SPQR
mutable cholmod_dense *m_HTau; // The Householder coefficients
mutable Index m_rank; // The rank of the matrix
mutable cholmod_common m_cc; // Workspace and parameters
bool m_useDefaultThreshold; // Use default threshold
template<typename ,typename > friend struct SPQR_QProduct;
};

View File

@ -742,6 +742,11 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
protected:
MatrixUType m_matrixU;
@ -818,6 +823,8 @@ template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
check_template_parameters();
using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);

View File

@ -57,6 +57,16 @@ public:
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
{}
inline const Scalar coeff(int row, int col) const
{
return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart);
}
inline const Scalar coeff(int index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
@ -226,6 +236,21 @@ public:
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
inline Scalar& coeffRef(int row, int col)
{
return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(int row, int col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(int index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
const Scalar& lastCoeff() const
{
@ -313,6 +338,16 @@ public:
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
inline const Scalar coeff(int row, int col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(int index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
const Scalar& lastCoeff() const
{
@ -355,7 +390,8 @@ const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatri
* is col-major (resp. row-major).
*/
template<typename Derived>
Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
typename SparseMatrixBase<Derived>::InnerVectorsReturnType
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{
return Block<Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
@ -367,7 +403,8 @@ Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Inde
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
@ -394,8 +431,8 @@ public:
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
m_blockRows(xpr.rows()),
m_blockCols(xpr.cols())
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
@ -497,3 +534,4 @@ public:
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H

View File

@ -314,10 +314,10 @@ SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& othe
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
{
return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
}
} // end namespace Eigen

Some files were not shown because too many files have changed in this diff Show More