diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh index 5cc6ada24..38359e87d 100644 --- a/.github/scripts/python.sh +++ b/.github/scripts/python.sh @@ -47,8 +47,6 @@ case $WRAPPER in ;; esac -git submodule update --init --recursive - mkdir $GITHUB_WORKSPACE/build cd $GITHUB_WORKSPACE/build @@ -82,9 +80,9 @@ case $WRAPPER in $PYTHON -m unittest discover ;; "pybind") - cd $GITHUB_WORKSPACE/python + cd $GITHUB_WORKSPACE/build/python $PYTHON setup.py install --user --prefix= - cd $GITHUB_WORKSPACE/wrap/python/gtsam_py/tests + cd $GITHUB_WORKSPACE/python/gtsam/tests $PYTHON -m unittest discover ;; *) diff --git a/.github/scripts/unix.sh b/.github/scripts/unix.sh index aa6e49650..f04100d0a 100644 --- a/.github/scripts/unix.sh +++ b/.github/scripts/unix.sh @@ -47,7 +47,6 @@ function configure() BUILD_DIR=$GITHUB_WORKSPACE/build #env - git submodule update --init --recursive rm -fr $BUILD_DIR || true mkdir $BUILD_DIR && cd $BUILD_DIR @@ -64,11 +63,11 @@ function configure() -DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \ -DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \ -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ - -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ + -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ + -DGTSAM_TYPEDEF_POINTS_TO_VECTOR=${GTSAM_TYPEDEF_POINTS_TO_VECTOR:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ -DBOOST_ROOT=$BOOST_ROOT \ -DBoost_NO_SYSTEM_PATHS=ON \ -DBoost_ARCHITECTURE=-x64 diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 911bec59c..afe328c3b 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -72,7 +72,7 @@ jobs: if: runner.os == 'Linux' run: | echo "BOOST_ROOT = $BOOST_ROOT" - - name: Build (Linux) + - name: Build and Test (Linux) if: runner.os == 'Linux' run: | bash .github/scripts/unix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 55d9071ef..363cd690f 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -45,7 +45,7 @@ jobs: echo "::set-env name=CC::clang" echo "::set-env name=CXX::clang++" fi - - name: Build (macOS) + - name: Build and Test (macOS) if: runner.os == 'macOS' run: | bash .github/scripts/unix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 0b4a7f12f..7a09c13d1 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -20,24 +20,25 @@ jobs: # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ ubuntu-18.04-gcc-5, - ubuntu-18.04-gcc-9, + # ubuntu-18.04-gcc-9, # TODO Disabled for now because of timeouts ubuntu-18.04-clang-9, macOS-10.15-xcode-11.3.1, ] build_type: [Debug, Release] python_version: [3] - wrapper: [cython] + wrapper: [pybind] include: - name: ubuntu-18.04-gcc-5 os: ubuntu-18.04 compiler: gcc version: "5" - - name: ubuntu-18.04-gcc-9 - os: ubuntu-18.04 - compiler: gcc - version: "9" + # TODO Disabled for now because of timeouts + # - name: ubuntu-18.04-gcc-9 + # os: ubuntu-18.04 + # compiler: gcc + # version: "9" - name: ubuntu-18.04-clang-9 os: ubuntu-18.04 diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml new file mode 100644 index 000000000..21984311b --- /dev/null +++ b/.github/workflows/build-special.yml @@ -0,0 +1,112 @@ +name: Special Cases CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ON + + strategy: + fail-fast: false + + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: + [ + ubuntu-gcc-deprecated, + ubuntu-gcc-quaternions, + ubuntu-gcc-points-vector, + ] + + build_type: [Debug, Release] + + include: + - name: ubuntu-gcc-deprecated + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: deprecated + + - name: ubuntu-gcc-quaternions + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: quaternions + + - name: ubuntu-gcc-points-vector + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: points-vector + + steps: + - name: Checkout + uses: actions/checkout@master + + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy + + echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" + echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + + - name: Set Allow Deprecated Flag + if: matrix.flag == 'deprecated' + env: + GTSAM_ALLOW_DEPRECATED_SINCE_V41: ON + run: echo "Allow deprecated since version 4.1" + + - name: Set Use Quaternions Flag + if: matrix.flag == 'quaternions' + env: + GTSAM_USE_QUATERNIONS: ON + run: echo "Use Quaternions for rotations" + + - name: Set Typedef Points to Vector Flag + if: matrix.flag == 'points-vector' + env: + GTSAM_TYPEDEF_POINTS_TO_VECTOR: ON + run: echo "Typedef Points to Vector" + + - name: Build & Test + run: | + bash .github/scripts/unix.sh -t diff --git a/.gitignore b/.gitignore index 1d89cac25..c2d6ce60f 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ cython/gtsam_wrapper.pxd /CMakeSettings.json # for QtCreator: CMakeLists.txt.user* +xcode/ diff --git a/CMakeLists.txt b/CMakeLists.txt index d2b9bc75e..c2918f1cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,18 +65,20 @@ add_custom_target(uninstall # Configurable Options if(GTSAM_UNSTABLE_AVAILABLE) option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) endif() option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) -option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) -option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) +option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) +option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) +option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) -option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF) option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) if(NOT MSVC AND NOT XCODE_VERSION) @@ -99,37 +101,40 @@ endif() # Options relating to MATLAB wrapper # TODO: Check for matlab mex binary before handling building of binaries option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) -option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF) -option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON) -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)") +set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.") # Check / set dependent variables for MATLAB wrapper -if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP") -endif() -if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES) - set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) -endif() -if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP) - message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP") +if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_TYPE_POSTFIXES) + set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) endif() if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") + message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") endif() -if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.") -endif() +if(GTSAM_BUILD_PYTHON) + if(GTSAM_UNSTABLE_BUILD_PYTHON) + if (NOT GTSAM_BUILD_UNSTABLE) + message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.") + set(GTSAM_UNSTABLE_BUILD_PYTHON OFF) + endif() + endif() -if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) - message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet. Please turn one of the two options off.") + set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python") endif() # Flags for choosing default packaging tools set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") +if (CMAKE_GENERATOR STREQUAL "Ninja" AND + ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR + (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5))) + # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support. + # Rationale in https://github.com/ninja-build/ninja/issues/814 + add_compile_options(-fdiagnostics-color=always) +endif() + ############################################################################### # Find boost @@ -138,18 +143,18 @@ set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") # Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT if(MSVC) - # By default, boost only builds static libraries on windows - set(Boost_USE_STATIC_LIBS ON) # only find static libs - # If we ever reset above on windows and, ... - # If we use Boost shared libs, disable auto linking. - # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. - if(NOT Boost_USE_STATIC_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) - endif() - # Virtual memory range for PCH exceeded on VS2015 - if(MSVC_VERSION LESS 1910) # older than VS2017 - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) - endif() + # By default, boost only builds static libraries on windows + set(Boost_USE_STATIC_LIBS ON) # only find static libs + # If we ever reset above on windows and, ... + # If we use Boost shared libs, disable auto linking. + # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. + if(NOT Boost_USE_STATIC_LIBS) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) + endif() + # Virtual memory range for PCH exceeded on VS2015 + if(MSVC_VERSION LESS 1910) # older than VS2017 + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) + endif() endif() # If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() @@ -157,7 +162,7 @@ endif() # See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 # if(MSVC AND BUILD_SHARED_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) endif() # Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. @@ -227,16 +232,16 @@ find_package(GooglePerfTools) ############################################################################### # Support ccache, if installed if(NOT MSVC AND NOT XCODE_VERSION) - find_program(CCACHE_FOUND ccache) - if(CCACHE_FOUND) - if(GTSAM_BUILD_WITH_CCACHE) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) - else() - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") - endif() - endif(CCACHE_FOUND) + find_program(CCACHE_FOUND ccache) + if(CCACHE_FOUND) + if(GTSAM_BUILD_WITH_CCACHE) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) + else() + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") + endif() + endif(CCACHE_FOUND) endif() ############################################################################### @@ -280,74 +285,74 @@ option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) # Switch for using system Eigen or GTSAM-bundled Eigen if(GTSAM_USE_SYSTEM_EIGEN) - find_package(Eigen3 REQUIRED) + find_package(Eigen3 REQUIRED) - # Use generic Eigen include paths e.g. - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") + # Use generic Eigen include paths e.g. + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") - # check if MKL is also enabled - can have one or the other, but not both! - # Note: Eigen >= v3.2.5 includes our patches - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) - message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") - endif() + # check if MKL is also enabled - can have one or the other, but not both! + # Note: Eigen >= v3.2.5 includes our patches + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) + message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") + endif() - # Check for Eigen version which doesn't work with MKL - # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) - message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") - endif() + # Check for Eigen version which doesn't work with MKL + # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) + message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") + endif() - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") else() - # Use bundled Eigen include path. - # Clear any variables set by FindEigen3 - if(EIGEN3_INCLUDE_DIR) - set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) - endif() + # Use bundled Eigen include path. + # Clear any variables set by FindEigen3 + if(EIGEN3_INCLUDE_DIR) + set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) + endif() - # set full path to be used by external projects - # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") + # set full path to be used by external projects + # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") endif() # Detect Eigen version: set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") if (EXISTS ${EIGEN_VER_H}) - file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) + file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) - # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... + # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... - string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") + string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") - string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") + string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") - string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") + string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") - set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") + set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") - message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") + message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") else() - message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") + message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") endif () if (MSVC) - if (BUILD_SHARED_LIBS) - # mute eigen static assert to avoid errors in shared lib - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) - endif() - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen + if (BUILD_SHARED_LIBS) + # mute eigen static assert to avoid errors in shared lib + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) + endif() + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen endif() if (APPLE AND BUILD_SHARED_LIBS) - # Set the default install directory on macOS - set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") + # Set the default install directory on macOS + set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") endif() ############################################################################### @@ -356,42 +361,42 @@ endif() # Build list of possible allocators set(possible_allocators "") if(GTSAM_USE_TBB) - list(APPEND possible_allocators TBB) - set(preferred_allocator TBB) + list(APPEND possible_allocators TBB) + set(preferred_allocator TBB) else() - list(APPEND possible_allocators BoostPool STL) - set(preferred_allocator STL) + list(APPEND possible_allocators BoostPool STL) + set(preferred_allocator STL) endif() if(GOOGLE_PERFTOOLS_FOUND) - list(APPEND possible_allocators tcmalloc) + list(APPEND possible_allocators tcmalloc) endif() # Check if current allocator choice is valid and set cache option list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) if(allocator_valid EQUAL -1) - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) else() - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") endif() set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) # Define compile flags depending on allocator if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") - set(GTSAM_ALLOCATOR_BOOSTPOOL 1) + set(GTSAM_ALLOCATOR_BOOSTPOOL 1) elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") - set(GTSAM_ALLOCATOR_STL 1) + set(GTSAM_ALLOCATOR_STL 1) elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") - set(GTSAM_ALLOCATOR_TBB 1) + set(GTSAM_ALLOCATOR_TBB 1) elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") - set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator - list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") + set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator + list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") endif() if(MSVC) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code endif() # GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. @@ -419,14 +424,11 @@ endif() # Build CppUnitLite add_subdirectory(CppUnitLite) -# Build wrap -if (GTSAM_BUILD_WRAP) - add_subdirectory(wrap) - # suppress warning of cython line being too long - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-misleading-indentation") - endif() -endif(GTSAM_BUILD_WRAP) +# This is the new wrapper +if(GTSAM_BUILD_PYTHON) + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake") + add_subdirectory(python) +endif() # Build GTSAM library add_subdirectory(gtsam) @@ -447,23 +449,9 @@ endif() # Matlab toolbox if (GTSAM_INSTALL_MATLAB_TOOLBOX) - add_subdirectory(matlab) + add_subdirectory(matlab) endif() -# Cython wrap -if (GTSAM_INSTALL_CYTHON_TOOLBOX) - set(GTSAM_INSTALL_CYTHON_TOOLBOX 1) - # Set up cache options - # Cython install path appended with Build type (e.g. cython, cythonDebug, etc). - # This does not override custom values set from the command line - set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython") - set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency) - add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH}) -else() - set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h -endif() - - # Install config and export files GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) @@ -524,7 +512,9 @@ endif() print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") if(GTSAM_UNSTABLE_AVAILABLE) - print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable") + print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ") + print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable") endif() if(NOT MSVC AND NOT XCODE_VERSION) @@ -572,13 +562,13 @@ else() endif() if(NOT MSVC AND NOT XCODE_VERSION) - if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) + if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) print_config("Build with ccache" "Yes") - elseif(CCACHE_FOUND) + elseif(CCACHE_FOUND) print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") - else() + else() print_config("Build with ccache" "No") - endif() + endif() endif() message(STATUS "Packaging flags") @@ -591,10 +581,8 @@ print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency c print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") -print_enabled_config(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") -print_enabled_config(${GTSAM_BUILD_WRAP} "Build Wrap ") message(STATUS "MATLAB toolbox flags") print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") @@ -602,23 +590,23 @@ if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) print_config("MATLAB root" "${MATLAB_ROOT}") print_config("MEX binary" "${MEX_COMMAND}") endif() - -message(STATUS "Cython toolbox flags ") -print_enabled_config(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") -if(GTSAM_INSTALL_CYTHON_TOOLBOX) - print_config("Python version" "${GTSAM_PYTHON_VERSION}") +message(STATUS "Python toolbox flags ") +print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ") +if(GTSAM_BUILD_PYTHON) + print_config("Python version" ${GTSAM_PYTHON_VERSION}) endif() + message(STATUS "===============================================================") # Print warnings at the end if(GTSAM_WITH_TBB AND NOT TBB_FOUND) - message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") + message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") endif() if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) - message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") + message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") endif() if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) - message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") + message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") endif() # Include CPack *after* all flags diff --git a/README.md b/README.md index 015d65e3d..99903f0b9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,10 @@ # README - Georgia Tech Smoothing and Mapping Library -**As of August 1, develop is officially in "Pre 4.1" mode, and features deprecated in 4.0 were removed. Use the last 4.0.3 release if you need those features. However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag GTSAM_ALLOW_DEPRECATED_SINCE_V4** +**Important Note** + +As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features. + +However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`. ## What is GTSAM? @@ -9,13 +13,14 @@ mapping (SAM) in robotics and vision, using Factor Graphs and Bayes Networks as the underlying computing paradigm rather than sparse matrices. -| Platform | Build Status | -|:---------:|:-------------:| -| gcc/clang | [![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/) | -| MSVC | [![Build status](https://ci.appveyor.com/api/projects/status/3enllitj52jsxwfg/branch/develop?svg=true)](https://ci.appveyor.com/project/dellaert/gtsam) | +| Platform | Compiler | Build Status | +|:------------:|:---------:|:-------------:| +| Ubuntu 18.04 | gcc/clang | ![Linux CI](https://github.com/borglab/gtsam/workflows/Linux%20CI/badge.svg) | +| macOS | clang | ![macOS CI](https://github.com/borglab/gtsam/workflows/macOS%20CI/badge.svg) | +| Windows | MSVC | ![Windows CI](https://github.com/borglab/gtsam/workflows/Windows%20CI/badge.svg) | -On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](##Wrappers). +On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](#wrappers). ## Quickstart diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index d612e2fae..9d9ecd48b 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -17,8 +17,6 @@ install(FILES GtsamBuildTypes.cmake GtsamMakeConfigFile.cmake GtsamMatlabWrap.cmake - GtsamPythonWrap.cmake - GtsamCythonWrap.cmake GtsamTesting.cmake GtsamPrinting.cmake FindCython.cmake diff --git a/cmake/GtsamCythonWrap.cmake b/cmake/GtsamCythonWrap.cmake deleted file mode 100644 index c8f876895..000000000 --- a/cmake/GtsamCythonWrap.cmake +++ /dev/null @@ -1,204 +0,0 @@ -# Check Cython version, need to be >=0.25.2 -# Unset these cached variables to avoid surprises when the python/cython -# in the current environment are different from the cached! -unset(PYTHON_EXECUTABLE CACHE) -unset(CYTHON_EXECUTABLE CACHE) -unset(PYTHON_INCLUDE_DIR CACHE) -unset(PYTHON_MAJOR_VERSION CACHE) -unset(PYTHON_LIBRARY CACHE) - -# Allow override from command line -if(NOT DEFINED GTSAM_USE_CUSTOM_PYTHON_LIBRARY) - if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp REQUIRED) - find_package(PythonLibs REQUIRED) - else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - endif() -endif() -find_package(Cython 0.25.2 REQUIRED) - -execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "from __future__ import print_function;import sys;print(sys.version[0], end='')" - OUTPUT_VARIABLE PYTHON_MAJOR_VERSION -) - -# User-friendly Cython wrapping and installing function. -# Builds a Cython module from the provided interface_header. -# For example, for the interface header gtsam.h, -# this will build the wrap module 'gtsam'. -# -# Arguments: -# -# interface_header: The relative path to the wrapper interface definition file. -# extra_imports: extra header to import in the Cython pxd file. -# For example, to use Cython gtsam.pxd in your own module, -# use "from gtsam cimport *" -# install_path: destination to install the library -# libs: libraries to link with -# dependencies: Dependencies which need to be built before the wrapper -function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies) - # Paths for generated files - get_filename_component(module_name "${interface_header}" NAME_WE) - set(generated_files_path "${install_path}") - wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}") -endfunction() - -function(set_up_required_cython_packages) - # Set up building of cython module - include_directories(${PYTHON_INCLUDE_DIRS}) - find_package(NumPy REQUIRED) - include_directories(${NUMPY_INCLUDE_DIRS}) -endfunction() - - -# Convert pyx to cpp by executing cython -# This is the first step to compile cython from the command line -# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# -# Arguments: -# - target: The specified target for this step -# - pyx_file: The input pyx_file in full *absolute* path -# - generated_cpp: The output cpp file in full absolute path -# - include_dirs: Directories to include when executing cython -function(pyx_to_cpp target pyx_file generated_cpp include_dirs) - foreach(dir ${include_dirs}) - set(includes_for_cython ${includes_for_cython} -I ${dir}) - endforeach() - - add_custom_command( - OUTPUT ${generated_cpp} - COMMAND - ${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp} - VERBATIM) - add_custom_target(${target} ALL DEPENDS ${generated_cpp}) -endfunction() - -# Build the cpp file generated by converting pyx using cython -# This is the second step to compile cython from the command line -# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# -# Arguments: -# - target: The specified target for this step -# - cpp_file: The input cpp_file in full *absolute* path -# - output_lib_we: The output lib filename only (without extension) -# - output_dir: The output directory -function(build_cythonized_cpp target cpp_file output_lib_we output_dir) - add_library(${target} MODULE ${cpp_file}) - - if(WIN32) - # Use .pyd extension instead of .dll on Windows - set_target_properties(${target} PROPERTIES SUFFIX ".pyd") - - # Add full path to the Python library - target_link_libraries(${target} ${PYTHON_LIBRARIES}) - endif() - - if(APPLE) - set(link_flags "-undefined dynamic_lookup") - endif() - set_target_properties(${target} - PROPERTIES COMPILE_FLAGS "-w" - LINK_FLAGS "${link_flags}" - OUTPUT_NAME ${output_lib_we} - PREFIX "" - ${CMAKE_BUILD_TYPE_UPPER}_POSTFIX "" - LIBRARY_OUTPUT_DIRECTORY ${output_dir}) -endfunction() - -# Cythonize a pyx from the command line as described at -# http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# Arguments: -# - target: The specified target -# - pyx_file: The input pyx_file in full *absolute* path -# - output_lib_we: The output lib filename only (without extension) -# - output_dir: The output directory -# - include_dirs: Directories to include when executing cython -# - libs: Libraries to link with -# - interface_header: For dependency. Any update in interface header will re-trigger cythonize -function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies) - get_filename_component(pyx_path "${pyx_file}" DIRECTORY) - get_filename_component(pyx_name "${pyx_file}" NAME_WE) - set(generated_cpp "${output_dir}/${pyx_name}.cpp") - - set_up_required_cython_packages() - pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}") - - # Late dependency injection, to make sure this gets called whenever the interface header is updated - # See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes - add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND) - if (NOT "${dependencies}" STREQUAL "") - add_dependencies(${target}_pyx2cpp "${dependencies}") - endif() - - build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir}) - if (NOT "${libs}" STREQUAL "") - target_link_libraries(${target} "${libs}") - endif() - add_dependencies(${target} ${target}_pyx2cpp) - - if(TARGET ${python_install_target}) - add_dependencies(${python_install_target} ${target}) - endif() -endfunction() - -# Internal function that wraps a library and compiles the wrapper -function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies) - # Wrap codegen interface - # Extract module path and name from interface header file name - # wrap requires interfacePath to be *absolute* - get_filename_component(interface_header "${interface_header}" ABSOLUTE) - get_filename_component(module_path "${interface_header}" PATH) - get_filename_component(module_name "${interface_header}" NAME_WE) - - # Wrap module to Cython pyx - message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx") - set(generated_pyx "${generated_files_path}/${module_name}.pyx") - if(NOT EXISTS ${generated_files_path}) - file(MAKE_DIRECTORY "${generated_files_path}") - endif() - - add_custom_command( - OUTPUT ${generated_pyx} - DEPENDS ${interface_header} wrap - COMMAND - wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}" - VERBATIM - WORKING_DIRECTORY ${generated_files_path}/../) - add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx}) - if(NOT "${dependencies}" STREQUAL "") - add_dependencies(cython_wrap_${module_name}_pyx ${dependencies}) - endif() - - message(STATUS "Cythonize and build ${module_name}.pyx") - get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES) - cythonize(cythonize_${module_name} ${generated_pyx} ${module_name} - ${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx) - - # distclean - add_custom_target(wrap_${module_name}_cython_distclean - COMMAND cmake -E remove_directory ${generated_files_path}) -endfunction() - -# Helper function to install Cython scripts and handle multiple build types where the scripts -# should be installed to all build type toolboxes -# -# Arguments: -# source_directory: The source directory to be installed. "The last component of each directory -# name is appended to the destination directory but a trailing slash may be -# used to avoid this because it leaves the last component empty." -# (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories) -# dest_directory: The destination directory to install to. -# patterns: list of file patterns to install -function(install_cython_scripts source_directory dest_directory patterns) - set(patterns_args "") - set(exclude_patterns "") - - foreach(pattern ${patterns}) - list(APPEND patterns_args PATTERN "${pattern}") - endforeach() - - file(COPY "${source_directory}" DESTINATION "${dest_directory}" - FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE) -endfunction() diff --git a/cmake/GtsamMatlabWrap.cmake b/cmake/GtsamMatlabWrap.cmake index 5fc829bf2..111114a7b 100644 --- a/cmake/GtsamMatlabWrap.cmake +++ b/cmake/GtsamMatlabWrap.cmake @@ -23,6 +23,11 @@ else() file(GLOB matlab_bin_directories "/usr/local/MATLAB/*/bin") set(mex_program_name "mex") endif() + +if(GTSAM_CUSTOM_MATLAB_PATH) + set(matlab_bin_directories ${GTSAM_CUSTOM_MATLAB_PATH}) +endif() + # Run find_program explicitly putting $PATH after our predefined program # directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents # finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is @@ -209,15 +214,34 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex # Set up generation of module source file file(MAKE_DIRECTORY "${generated_files_path}") + + if(GTSAM_PYTHON_VERSION STREQUAL "Default") + find_package(PythonInterp REQUIRED) + find_package(PythonLibs REQUIRED) + else() + find_package(PythonInterp + ${GTSAM_PYTHON_VERSION} + EXACT + REQUIRED) + find_package(PythonLibs + ${GTSAM_PYTHON_VERSION} + EXACT + REQUIRED) + endif() + + set(_ignore gtsam::Point2 + gtsam::Point3) add_custom_command( OUTPUT ${generated_cpp_file} - DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} - COMMAND - wrap --matlab - ${modulePath} - ${moduleName} - ${generated_files_path} - ${matlab_h_path} + DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} + COMMAND + ${PYTHON_EXECUTABLE} + ${CMAKE_SOURCE_DIR}/wrap/matlab_wrapper.py + --src ${interfaceHeader} + --module_name ${moduleName} + --out ${generated_files_path} + --top_module_namespaces ${moduleName} + --ignore ${_ignore} VERBATIM WORKING_DIRECTORY ${generated_files_path}) diff --git a/cmake/GtsamPythonWrap.cmake b/cmake/GtsamPythonWrap.cmake deleted file mode 100644 index 714e37488..000000000 --- a/cmake/GtsamPythonWrap.cmake +++ /dev/null @@ -1,102 +0,0 @@ -#Setup cache options -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version") -set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation") -set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python") -if(NOT GTSAM_PYTHON_INSTALL_PATH) - set(GTSAM_PYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/borg/python") -endif() - -#Author: Paul Furgale Modified by Andrew Melim -function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY) - # # Boost - # find_package(Boost COMPONENTS python filesystem system REQUIRED) - # include_directories(${Boost_INCLUDE_DIRS}) - - # # Find Python - # FIND_PACKAGE(PythonLibs 2.7 REQUIRED) - # INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS}) - - IF(APPLE) - # The apple framework headers don't include the numpy headers for some reason. - GET_FILENAME_COMPONENT(REAL_PYTHON_INCLUDE ${PYTHON_INCLUDE_DIRS} REALPATH) - IF( ${REAL_PYTHON_INCLUDE} MATCHES Python.framework) - message("Trying to find extra headers for numpy from ${REAL_PYTHON_INCLUDE}.") - message("Looking in ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy") - FIND_PATH(NUMPY_INCLUDE_DIR arrayobject.h - ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy - ${REAL_PYTHON_INCLUDE}/numpy - ) - IF(${NUMPY_INCLUDE_DIR} MATCHES NOTFOUND) - message("Unable to find numpy include directories: ${NUMPY_INCLUDE_DIR}") - ELSE() - message("Found headers at ${NUMPY_INCLUDE_DIR}") - INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}) - INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}/..) - ENDIF() - ENDIF() - ENDIF(APPLE) - - if(MSVC) - add_library(${moduleName}_python MODULE ${ARGN}) - set_target_properties(${moduleName}_python PROPERTIES - OUTPUT_NAME ${moduleName}_python - CLEAN_DIRECT_OUTPUT 1 - VERSION 1 - SOVERSION 0 - SUFFIX ".pyd") - target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp - - set(PYLIB_OUTPUT_FILE $) - message(${PYLIB_OUTPUT_FILE}) - get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) - set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd) - - ELSE() - # Create a shared library - add_library(${moduleName}_python SHARED ${generated_cpp_file}) - - set_target_properties(${moduleName}_python PROPERTIES - OUTPUT_NAME ${moduleName}_python - CLEAN_DIRECT_OUTPUT 1) - target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp - # On OSX and Linux, the python library must end in the extension .so. Build this - # filename here. - get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION) - set(PYLIB_OUTPUT_FILE $) - message(${PYLIB_OUTPUT_FILE}) - get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) - set(PYLIB_SO_NAME lib${moduleName}_python.so) - ENDIF(MSVC) - - # Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package - set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam) - # Cause the library to be output in the correct directory. - add_custom_command(TARGET ${moduleName}_python - POST_BUILD - COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMENT "Copying library files to python directory" ) - - # Cause the library to be output in the correct directory. - add_custom_command(TARGET ${TARGET_NAME} - POST_BUILD - COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMENT "Copying library files to python directory" ) - - get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES) - list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}) - set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}") -endfunction(wrap_python) - -# Macro to get list of subdirectories -macro(SUBDIRLIST result curdir) - file(GLOB children RELATIVE ${curdir} ${curdir}/*) - set(dirlist "") - foreach(child ${children}) - if(IS_DIRECTORY ${curdir}/${child}) - list(APPEND dirlist ${child}) - endif() - endforeach() - set(${result} ${dirlist}) -endmacro() diff --git a/cmake/dllexport.h.in b/cmake/dllexport.h.in index 9a0a344b7..7d757edea 100644 --- a/cmake/dllexport.h.in +++ b/cmake/dllexport.h.in @@ -47,9 +47,14 @@ # endif # endif #else +#ifdef __APPLE__ +# define @library_name@_EXPORT __attribute__((visibility("default"))) +# define @library_name@_EXTERN_EXPORT extern +#else # define @library_name@_EXPORT # define @library_name@_EXTERN_EXPORT extern #endif +#endif #undef BUILD_SHARED_LIBS diff --git a/cython/CMakeLists.txt b/cython/CMakeLists.txt deleted file mode 100644 index 221025575..000000000 --- a/cython/CMakeLists.txt +++ /dev/null @@ -1,57 +0,0 @@ -# Install cython components -include(GtsamCythonWrap) - -# Create the cython toolbox for the gtsam library -if (GTSAM_INSTALL_CYTHON_TOOLBOX) - # Add the new make target command - set(python_install_target python-install) - add_custom_target(${python_install_target} - COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install - WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH}) - - # build and include the eigency version of eigency - add_subdirectory(gtsam_eigency) - include_directories(${GTSAM_EIGENCY_INSTALL_PATH}) - - # Fix for error "C1128: number of sections exceeded object file format limit" - if(MSVC) - add_compile_options(/bigobj) - endif() - - # First set up all the package related files. - # This also ensures the below wrap operations work correctly. - set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt") - - # Install the custom-generated __init__.py - # This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable - configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY) - configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY) - configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py) - - # Wrap gtsam - add_custom_target(gtsam_header DEPENDS "../gtsam.h") - wrap_and_install_library_cython("../gtsam.h" # interface_header - "" # extra imports - "${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path - gtsam # library to link with - "wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping - ) - add_dependencies(${python_install_target} gtsam gtsam_header) - - # Wrap gtsam_unstable - if(GTSAM_BUILD_UNSTABLE) - add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h") - wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header - "from gtsam.gtsam cimport *" # extra imports - "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path - gtsam_unstable # library to link with - "gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping - ) - add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header) - endif() - - # install scripts and tests - install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") - install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") - -endif () diff --git a/cython/README.md b/cython/README.md deleted file mode 100644 index f69b7a5a6..000000000 --- a/cython/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# Python Wrapper - -This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code. - -## Requirements - -- If you want to build the GTSAM python library for a specific python version (eg 3.6), - use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used. -- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment), - then the environment should be active while building GTSAM. -- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows: - - ```bash - pip install -r /cython/requirements.txt - ``` - -- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git), - named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy. - -## Install - -- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `/cython` in Release mode and `/cython` for other modes. - -- Build GTSAM and the wrapper with `make`. - -- To install, simply run `make python-install`. - - The same command can be used to install into a virtual environment if it is active. - - **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory. - -- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly. - -## Unit Tests - -The Cython toolbox also has a small set of unit tests located in the -test directory. To run them: - - ```bash - cd - python -m unittest discover - ``` - -## Utils - -TODO - -## Examples - -TODO - -## Writing Your Own Scripts - -See the tests for examples. - -### Some Important Notes: - -- Vector/Matrix: - - - GTSAM expects double-precision floating point vectors and matrices. - Hence, you should pass numpy matrices with `dtype=float`, or `float64`. - - Also, GTSAM expects _column-major_ matrices, unlike the default storage - scheme in numpy. Hence, you should pass column-major matrices to GTSAM using - the flag order='F'. And you always get column-major matrices back. - For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed). - - Passing row-major matrices of different dtype, e.g. `int`, will also work - as the wrapper converts them to column-major and dtype float for you, - using numpy.array.astype(float, order='F', copy=False). - However, this will result a copy if your matrix is not in the expected type - and storage order. - -- Inner namespace: Classes in inner namespace will be prefixed by \_ in Python. - - Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey` - -- Casting from a base class to a derive class must be done explicitly. - - Examples: - - ```python - noiseBase = factor.noiseModel() - noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase) - ``` - -## Wrapping Custom GTSAM-based Project - -Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python). - -## KNOWN ISSUES - -- Doesn't work with python3 installed from homebrew - - size-related issue: can only wrap up to a certain number of classes: up to mEstimator! - - Guess: 64 vs 32b? disutils Compiler flags? -- Bug with Cython 0.24: instantiated factor classes return FastVector for keys(), which can't be casted to FastVector - - Upgrading to 0.25 solves the problem -- Need default constructor and default copy constructor for almost every classes... :( - - support these constructors by default and declare "delete" for special classes? - -### TODO - -- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython. -- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?) -- [ ] inner namespaces ==> inner packages? -- [ ] Wrap fixed-size Matrices/Vectors? - -### Completed/Cancelled: - -- [x] Fix Python tests: don't use " import \* ": Bad style!!! (18-03-17 19:50) -- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files -- [x] Wrap unstable @done (18-03-17 15:30) -- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30) -- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem. -- [x] 06-03-17: manage to remove the requirements for default and copy constructors -- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector, to FastVector. -- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however. -- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00) -- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30) -- [x] CMake install script @done (25-11-16 02:30) -- [ ] [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy -- [ ] forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work? -- [x] wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00) -- [x] Global functions @done (22-11-16 21:00) -- [x] [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00) -- [x] Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00) -- [x] Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00) -- [x] Casting from parent and grandparents @done (16-11-16 17:00) -- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00) -- [x] Support "print obj" @done (16-11-16 17:00) -- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00) -- [x] Cython: Key and size_t: traits doesn't exist @done (16-09-12 18:34) -- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19) -- [x] [Nice to have] parse typedef @done (16-09-13 17:19) -- [x] ctypedef at correct places @done (16-09-12 18:34) -- [x] expand template variable type in constructor/static methods? @done (16-09-12 18:34) -- [x] NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20) -- [x] Value: no default constructor @done (16-09-13 17:20) -- [x] ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25) -- [x] Delete duplicate methods in derived class @done (16-09-12 13:38) -- [x] Fix return properly @done (16-09-11 17:14) -- [x] handle pair @done (16-09-11 17:14) -- [x] Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59) -- [x] Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59) -- [x] Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22) -- [x] Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05) -- [ ] Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20) -- [ ] Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28) - -- inference before symbolic/linear -- what's the purpose of "virtual" ?? diff --git a/cython/gtsam/__init__.py b/cython/gtsam/__init__.py deleted file mode 100644 index d40ee4502..000000000 --- a/cython/gtsam/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -from .gtsam import * - -try: - import gtsam_unstable - - - def _deprecated_wrapper(item, name): - def wrapper(*args, **kwargs): - from warnings import warn - message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) + - 'Please import it from gtsam_unstable.') - warn(message) - return item(*args, **kwargs) - return wrapper - - - for name in dir(gtsam_unstable): - if not name.startswith('__'): - item = getattr(gtsam_unstable, name) - if callable(item): - item = _deprecated_wrapper(item, name) - globals()[name] = item - -except ImportError: - pass - diff --git a/cython/gtsam_eigency/CMakeLists.txt b/cython/gtsam_eigency/CMakeLists.txt deleted file mode 100644 index a0cf0fbde..000000000 --- a/cython/gtsam_eigency/CMakeLists.txt +++ /dev/null @@ -1,42 +0,0 @@ -include(GtsamCythonWrap) - -# Copy eigency's sources to the build folder -# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core -# and eigency's cython pxd headers can be found when cythonizing gtsam -file(COPY "." DESTINATION ".") -set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency") -set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR}) - -# This is to make the build/cython/gtsam_eigency folder a python package -configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py) - -# include eigency headers -include_directories(${EIGENCY_INCLUDE_DIR}) - -# Cythonize and build eigency -message(STATUS "Cythonize and build eigency") -# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is -# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions() -# in conversions_api.h correctly! -cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions" - "${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "") -cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core" - ${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "") - -# Include Eigen headers: -target_include_directories(cythonize_eigency_conversions PUBLIC - $ - $ -) -target_include_directories(cythonize_eigency_core PUBLIC - $ - $ -) - -add_dependencies(cythonize_eigency_core cythonize_eigency_conversions) -add_custom_target(cythonize_eigency) -add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core) - -if(TARGET ${python_install_target}) - add_dependencies(${python_install_target} cythonize_eigency) -endif() diff --git a/cython/gtsam_eigency/LICENSE.txt b/cython/gtsam_eigency/LICENSE.txt deleted file mode 100644 index 71743c864..000000000 --- a/cython/gtsam_eigency/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 Wouter Boomsma - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cython/gtsam_eigency/__init__.py.in b/cython/gtsam_eigency/__init__.py.in deleted file mode 100644 index a59d51eab..000000000 --- a/cython/gtsam_eigency/__init__.py.in +++ /dev/null @@ -1,13 +0,0 @@ -import os -import numpy as np - -__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}" - -def get_includes(include_eigen=True): - root = os.path.dirname(__file__) - parent = os.path.join(root, "..") - path = [root, parent, np.get_include()] - if include_eigen: - path.append(os.path.join(root, __eigen_dir__)) - return path - diff --git a/cython/gtsam_eigency/conversions.pxd b/cython/gtsam_eigency/conversions.pxd deleted file mode 100644 index f4445e585..000000000 --- a/cython/gtsam_eigency/conversions.pxd +++ /dev/null @@ -1,62 +0,0 @@ -cimport numpy as np - -cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) - diff --git a/cython/gtsam_eigency/conversions.pyx b/cython/gtsam_eigency/conversions.pyx deleted file mode 100644 index 55c9ae0cd..000000000 --- a/cython/gtsam_eigency/conversions.pyx +++ /dev/null @@ -1,327 +0,0 @@ -cimport cython -import numpy as np -from numpy.lib.stride_tricks import as_strided - -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[:,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[::1,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[:,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[::1,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[:,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[::1,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[:,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[::1,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[:,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[::1,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[:,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[::1,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[:,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[::1,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[:,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[::1,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[:,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[::1,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[:,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[::1,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[:,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[::1,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[:,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[::1,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[:,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[::1,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[:,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[::1,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[:,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[::1,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[:,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[::1,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[:,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[::1,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[:,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[::1,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[:,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[::1,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[:,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[::1,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - diff --git a/cython/gtsam_eigency/core.pxd b/cython/gtsam_eigency/core.pxd deleted file mode 100644 index 9a84c3c16..000000000 --- a/cython/gtsam_eigency/core.pxd +++ /dev/null @@ -1,917 +0,0 @@ -cimport cython -cimport numpy as np - -ctypedef signed char schar; -ctypedef unsigned char uchar; - -ctypedef fused dtype: - uchar - schar - short - int - long - float - double - -ctypedef fused DenseType: - Matrix - Array - -ctypedef fused Rows: - _1 - _2 - _3 - _4 - _5 - _6 - _7 - _8 - _9 - _10 - _11 - _12 - _13 - _14 - _15 - _16 - _17 - _18 - _19 - _20 - _21 - _22 - _23 - _24 - _25 - _26 - _27 - _28 - _29 - _30 - _31 - _32 - Dynamic - -ctypedef Rows Cols -ctypedef Rows StrideOuter -ctypedef Rows StrideInner - -ctypedef fused DenseTypeShort: - Vector1i - Vector2i - Vector3i - Vector4i - VectorXi - RowVector1i - RowVector2i - RowVector3i - RowVector4i - RowVectorXi - Matrix1i - Matrix2i - Matrix3i - Matrix4i - MatrixXi - Vector1f - Vector2f - Vector3f - Vector4f - VectorXf - RowVector1f - RowVector2f - RowVector3f - RowVector4f - RowVectorXf - Matrix1f - Matrix2f - Matrix3f - Matrix4f - MatrixXf - Vector1d - Vector2d - Vector3d - Vector4d - VectorXd - RowVector1d - RowVector2d - RowVector3d - RowVector4d - RowVectorXd - Matrix1d - Matrix2d - Matrix3d - Matrix4d - MatrixXd - Vector1cf - Vector2cf - Vector3cf - Vector4cf - VectorXcf - RowVector1cf - RowVector2cf - RowVector3cf - RowVector4cf - RowVectorXcf - Matrix1cf - Matrix2cf - Matrix3cf - Matrix4cf - MatrixXcf - Vector1cd - Vector2cd - Vector3cd - Vector4cd - VectorXcd - RowVector1cd - RowVector2cd - RowVector3cd - RowVector4cd - RowVectorXcd - Matrix1cd - Matrix2cd - Matrix3cd - Matrix4cd - MatrixXcd - Array22i - Array23i - Array24i - Array2Xi - Array32i - Array33i - Array34i - Array3Xi - Array42i - Array43i - Array44i - Array4Xi - ArrayX2i - ArrayX3i - ArrayX4i - ArrayXXi - Array2i - Array3i - Array4i - ArrayXi - Array22f - Array23f - Array24f - Array2Xf - Array32f - Array33f - Array34f - Array3Xf - Array42f - Array43f - Array44f - Array4Xf - ArrayX2f - ArrayX3f - ArrayX4f - ArrayXXf - Array2f - Array3f - Array4f - ArrayXf - Array22d - Array23d - Array24d - Array2Xd - Array32d - Array33d - Array34d - Array3Xd - Array42d - Array43d - Array44d - Array4Xd - ArrayX2d - ArrayX3d - ArrayX4d - ArrayXXd - Array2d - Array3d - Array4d - ArrayXd - Array22cf - Array23cf - Array24cf - Array2Xcf - Array32cf - Array33cf - Array34cf - Array3Xcf - Array42cf - Array43cf - Array44cf - Array4Xcf - ArrayX2cf - ArrayX3cf - ArrayX4cf - ArrayXXcf - Array2cf - Array3cf - Array4cf - ArrayXcf - Array22cd - Array23cd - Array24cd - Array2Xcd - Array32cd - Array33cd - Array34cd - Array3Xcd - Array42cd - Array43cd - Array44cd - Array4Xcd - ArrayX2cd - ArrayX3cd - ArrayX4cd - ArrayXXcd - Array2cd - Array3cd - Array4cd - ArrayXcd - -ctypedef fused StorageOrder: - RowMajor - ColMajor - -ctypedef fused MapOptions: - Aligned - Unaligned - -cdef extern from "eigency_cpp.h" namespace "eigency": - - cdef cppclass _1 "1": - pass - - cdef cppclass _2 "2": - pass - - cdef cppclass _3 "3": - pass - - cdef cppclass _4 "4": - pass - - cdef cppclass _5 "5": - pass - - cdef cppclass _6 "6": - pass - - cdef cppclass _7 "7": - pass - - cdef cppclass _8 "8": - pass - - cdef cppclass _9 "9": - pass - - cdef cppclass _10 "10": - pass - - cdef cppclass _11 "11": - pass - - cdef cppclass _12 "12": - pass - - cdef cppclass _13 "13": - pass - - cdef cppclass _14 "14": - pass - - cdef cppclass _15 "15": - pass - - cdef cppclass _16 "16": - pass - - cdef cppclass _17 "17": - pass - - cdef cppclass _18 "18": - pass - - cdef cppclass _19 "19": - pass - - cdef cppclass _20 "20": - pass - - cdef cppclass _21 "21": - pass - - cdef cppclass _22 "22": - pass - - cdef cppclass _23 "23": - pass - - cdef cppclass _24 "24": - pass - - cdef cppclass _25 "25": - pass - - cdef cppclass _26 "26": - pass - - cdef cppclass _27 "27": - pass - - cdef cppclass _28 "28": - pass - - cdef cppclass _29 "29": - pass - - cdef cppclass _30 "30": - pass - - cdef cppclass _31 "31": - pass - - cdef cppclass _32 "32": - pass - - cdef cppclass PlainObjectBase: - pass - - cdef cppclass Map[DenseTypeShort](PlainObjectBase): - Map() except + - Map(np.ndarray array) except + - - cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]: - FlattenedMap() except + - FlattenedMap(np.ndarray array) except + - - cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]: - FlattenedMapWithOrder() except + - FlattenedMapWithOrder(np.ndarray array) except + - - cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]: - FlattenedMapWithStride() except + - FlattenedMapWithStride(np.ndarray array) except + - - cdef np.ndarray ndarray_view(PlainObjectBase &) - cdef np.ndarray ndarray_copy(PlainObjectBase &) - cdef np.ndarray ndarray(PlainObjectBase &) - - -cdef extern from "eigency_cpp.h" namespace "Eigen": - - cdef cppclass Dynamic: - pass - - cdef cppclass RowMajor: - pass - - cdef cppclass ColMajor: - pass - - cdef cppclass Aligned: - pass - - cdef cppclass Unaligned: - pass - - cdef cppclass Matrix(PlainObjectBase): - pass - - cdef cppclass Array(PlainObjectBase): - pass - - cdef cppclass VectorXd(PlainObjectBase): - pass - - cdef cppclass Vector1i(PlainObjectBase): - pass - - cdef cppclass Vector2i(PlainObjectBase): - pass - - cdef cppclass Vector3i(PlainObjectBase): - pass - - cdef cppclass Vector4i(PlainObjectBase): - pass - - cdef cppclass VectorXi(PlainObjectBase): - pass - - cdef cppclass RowVector1i(PlainObjectBase): - pass - - cdef cppclass RowVector2i(PlainObjectBase): - pass - - cdef cppclass RowVector3i(PlainObjectBase): - pass - - cdef cppclass RowVector4i(PlainObjectBase): - pass - - cdef cppclass RowVectorXi(PlainObjectBase): - pass - - cdef cppclass Matrix1i(PlainObjectBase): - pass - - cdef cppclass Matrix2i(PlainObjectBase): - pass - - cdef cppclass Matrix3i(PlainObjectBase): - pass - - cdef cppclass Matrix4i(PlainObjectBase): - pass - - cdef cppclass MatrixXi(PlainObjectBase): - pass - - cdef cppclass Vector1f(PlainObjectBase): - pass - - cdef cppclass Vector2f(PlainObjectBase): - pass - - cdef cppclass Vector3f(PlainObjectBase): - pass - - cdef cppclass Vector4f(PlainObjectBase): - pass - - cdef cppclass VectorXf(PlainObjectBase): - pass - - cdef cppclass RowVector1f(PlainObjectBase): - pass - - cdef cppclass RowVector2f(PlainObjectBase): - pass - - cdef cppclass RowVector3f(PlainObjectBase): - pass - - cdef cppclass RowVector4f(PlainObjectBase): - pass - - cdef cppclass RowVectorXf(PlainObjectBase): - pass - - cdef cppclass Matrix1f(PlainObjectBase): - pass - - cdef cppclass Matrix2f(PlainObjectBase): - pass - - cdef cppclass Matrix3f(PlainObjectBase): - pass - - cdef cppclass Matrix4f(PlainObjectBase): - pass - - cdef cppclass MatrixXf(PlainObjectBase): - pass - - cdef cppclass Vector1d(PlainObjectBase): - pass - - cdef cppclass Vector2d(PlainObjectBase): - pass - - cdef cppclass Vector3d(PlainObjectBase): - pass - - cdef cppclass Vector4d(PlainObjectBase): - pass - - cdef cppclass VectorXd(PlainObjectBase): - pass - - cdef cppclass RowVector1d(PlainObjectBase): - pass - - cdef cppclass RowVector2d(PlainObjectBase): - pass - - cdef cppclass RowVector3d(PlainObjectBase): - pass - - cdef cppclass RowVector4d(PlainObjectBase): - pass - - cdef cppclass RowVectorXd(PlainObjectBase): - pass - - cdef cppclass Matrix1d(PlainObjectBase): - pass - - cdef cppclass Matrix2d(PlainObjectBase): - pass - - cdef cppclass Matrix3d(PlainObjectBase): - pass - - cdef cppclass Matrix4d(PlainObjectBase): - pass - - cdef cppclass MatrixXd(PlainObjectBase): - pass - - cdef cppclass Vector1cf(PlainObjectBase): - pass - - cdef cppclass Vector2cf(PlainObjectBase): - pass - - cdef cppclass Vector3cf(PlainObjectBase): - pass - - cdef cppclass Vector4cf(PlainObjectBase): - pass - - cdef cppclass VectorXcf(PlainObjectBase): - pass - - cdef cppclass RowVector1cf(PlainObjectBase): - pass - - cdef cppclass RowVector2cf(PlainObjectBase): - pass - - cdef cppclass RowVector3cf(PlainObjectBase): - pass - - cdef cppclass RowVector4cf(PlainObjectBase): - pass - - cdef cppclass RowVectorXcf(PlainObjectBase): - pass - - cdef cppclass Matrix1cf(PlainObjectBase): - pass - - cdef cppclass Matrix2cf(PlainObjectBase): - pass - - cdef cppclass Matrix3cf(PlainObjectBase): - pass - - cdef cppclass Matrix4cf(PlainObjectBase): - pass - - cdef cppclass MatrixXcf(PlainObjectBase): - pass - - cdef cppclass Vector1cd(PlainObjectBase): - pass - - cdef cppclass Vector2cd(PlainObjectBase): - pass - - cdef cppclass Vector3cd(PlainObjectBase): - pass - - cdef cppclass Vector4cd(PlainObjectBase): - pass - - cdef cppclass VectorXcd(PlainObjectBase): - pass - - cdef cppclass RowVector1cd(PlainObjectBase): - pass - - cdef cppclass RowVector2cd(PlainObjectBase): - pass - - cdef cppclass RowVector3cd(PlainObjectBase): - pass - - cdef cppclass RowVector4cd(PlainObjectBase): - pass - - cdef cppclass RowVectorXcd(PlainObjectBase): - pass - - cdef cppclass Matrix1cd(PlainObjectBase): - pass - - cdef cppclass Matrix2cd(PlainObjectBase): - pass - - cdef cppclass Matrix3cd(PlainObjectBase): - pass - - cdef cppclass Matrix4cd(PlainObjectBase): - pass - - cdef cppclass MatrixXcd(PlainObjectBase): - pass - - cdef cppclass Array22i(PlainObjectBase): - pass - - cdef cppclass Array23i(PlainObjectBase): - pass - - cdef cppclass Array24i(PlainObjectBase): - pass - - cdef cppclass Array2Xi(PlainObjectBase): - pass - - cdef cppclass Array32i(PlainObjectBase): - pass - - cdef cppclass Array33i(PlainObjectBase): - pass - - cdef cppclass Array34i(PlainObjectBase): - pass - - cdef cppclass Array3Xi(PlainObjectBase): - pass - - cdef cppclass Array42i(PlainObjectBase): - pass - - cdef cppclass Array43i(PlainObjectBase): - pass - - cdef cppclass Array44i(PlainObjectBase): - pass - - cdef cppclass Array4Xi(PlainObjectBase): - pass - - cdef cppclass ArrayX2i(PlainObjectBase): - pass - - cdef cppclass ArrayX3i(PlainObjectBase): - pass - - cdef cppclass ArrayX4i(PlainObjectBase): - pass - - cdef cppclass ArrayXXi(PlainObjectBase): - pass - - cdef cppclass Array2i(PlainObjectBase): - pass - - cdef cppclass Array3i(PlainObjectBase): - pass - - cdef cppclass Array4i(PlainObjectBase): - pass - - cdef cppclass ArrayXi(PlainObjectBase): - pass - - cdef cppclass Array22f(PlainObjectBase): - pass - - cdef cppclass Array23f(PlainObjectBase): - pass - - cdef cppclass Array24f(PlainObjectBase): - pass - - cdef cppclass Array2Xf(PlainObjectBase): - pass - - cdef cppclass Array32f(PlainObjectBase): - pass - - cdef cppclass Array33f(PlainObjectBase): - pass - - cdef cppclass Array34f(PlainObjectBase): - pass - - cdef cppclass Array3Xf(PlainObjectBase): - pass - - cdef cppclass Array42f(PlainObjectBase): - pass - - cdef cppclass Array43f(PlainObjectBase): - pass - - cdef cppclass Array44f(PlainObjectBase): - pass - - cdef cppclass Array4Xf(PlainObjectBase): - pass - - cdef cppclass ArrayX2f(PlainObjectBase): - pass - - cdef cppclass ArrayX3f(PlainObjectBase): - pass - - cdef cppclass ArrayX4f(PlainObjectBase): - pass - - cdef cppclass ArrayXXf(PlainObjectBase): - pass - - cdef cppclass Array2f(PlainObjectBase): - pass - - cdef cppclass Array3f(PlainObjectBase): - pass - - cdef cppclass Array4f(PlainObjectBase): - pass - - cdef cppclass ArrayXf(PlainObjectBase): - pass - - cdef cppclass Array22d(PlainObjectBase): - pass - - cdef cppclass Array23d(PlainObjectBase): - pass - - cdef cppclass Array24d(PlainObjectBase): - pass - - cdef cppclass Array2Xd(PlainObjectBase): - pass - - cdef cppclass Array32d(PlainObjectBase): - pass - - cdef cppclass Array33d(PlainObjectBase): - pass - - cdef cppclass Array34d(PlainObjectBase): - pass - - cdef cppclass Array3Xd(PlainObjectBase): - pass - - cdef cppclass Array42d(PlainObjectBase): - pass - - cdef cppclass Array43d(PlainObjectBase): - pass - - cdef cppclass Array44d(PlainObjectBase): - pass - - cdef cppclass Array4Xd(PlainObjectBase): - pass - - cdef cppclass ArrayX2d(PlainObjectBase): - pass - - cdef cppclass ArrayX3d(PlainObjectBase): - pass - - cdef cppclass ArrayX4d(PlainObjectBase): - pass - - cdef cppclass ArrayXXd(PlainObjectBase): - pass - - cdef cppclass Array2d(PlainObjectBase): - pass - - cdef cppclass Array3d(PlainObjectBase): - pass - - cdef cppclass Array4d(PlainObjectBase): - pass - - cdef cppclass ArrayXd(PlainObjectBase): - pass - - cdef cppclass Array22cf(PlainObjectBase): - pass - - cdef cppclass Array23cf(PlainObjectBase): - pass - - cdef cppclass Array24cf(PlainObjectBase): - pass - - cdef cppclass Array2Xcf(PlainObjectBase): - pass - - cdef cppclass Array32cf(PlainObjectBase): - pass - - cdef cppclass Array33cf(PlainObjectBase): - pass - - cdef cppclass Array34cf(PlainObjectBase): - pass - - cdef cppclass Array3Xcf(PlainObjectBase): - pass - - cdef cppclass Array42cf(PlainObjectBase): - pass - - cdef cppclass Array43cf(PlainObjectBase): - pass - - cdef cppclass Array44cf(PlainObjectBase): - pass - - cdef cppclass Array4Xcf(PlainObjectBase): - pass - - cdef cppclass ArrayX2cf(PlainObjectBase): - pass - - cdef cppclass ArrayX3cf(PlainObjectBase): - pass - - cdef cppclass ArrayX4cf(PlainObjectBase): - pass - - cdef cppclass ArrayXXcf(PlainObjectBase): - pass - - cdef cppclass Array2cf(PlainObjectBase): - pass - - cdef cppclass Array3cf(PlainObjectBase): - pass - - cdef cppclass Array4cf(PlainObjectBase): - pass - - cdef cppclass ArrayXcf(PlainObjectBase): - pass - - cdef cppclass Array22cd(PlainObjectBase): - pass - - cdef cppclass Array23cd(PlainObjectBase): - pass - - cdef cppclass Array24cd(PlainObjectBase): - pass - - cdef cppclass Array2Xcd(PlainObjectBase): - pass - - cdef cppclass Array32cd(PlainObjectBase): - pass - - cdef cppclass Array33cd(PlainObjectBase): - pass - - cdef cppclass Array34cd(PlainObjectBase): - pass - - cdef cppclass Array3Xcd(PlainObjectBase): - pass - - cdef cppclass Array42cd(PlainObjectBase): - pass - - cdef cppclass Array43cd(PlainObjectBase): - pass - - cdef cppclass Array44cd(PlainObjectBase): - pass - - cdef cppclass Array4Xcd(PlainObjectBase): - pass - - cdef cppclass ArrayX2cd(PlainObjectBase): - pass - - cdef cppclass ArrayX3cd(PlainObjectBase): - pass - - cdef cppclass ArrayX4cd(PlainObjectBase): - pass - - cdef cppclass ArrayXXcd(PlainObjectBase): - pass - - cdef cppclass Array2cd(PlainObjectBase): - pass - - cdef cppclass Array3cd(PlainObjectBase): - pass - - cdef cppclass Array4cd(PlainObjectBase): - pass - - cdef cppclass ArrayXcd(PlainObjectBase): - pass - - diff --git a/cython/gtsam_eigency/core.pyx b/cython/gtsam_eigency/core.pyx deleted file mode 100644 index 8b1378917..000000000 --- a/cython/gtsam_eigency/core.pyx +++ /dev/null @@ -1 +0,0 @@ - diff --git a/cython/gtsam_eigency/eigency_cpp.h b/cython/gtsam_eigency/eigency_cpp.h deleted file mode 100644 index ce303182e..000000000 --- a/cython/gtsam_eigency/eigency_cpp.h +++ /dev/null @@ -1,504 +0,0 @@ -#include - -#include -#include -#include - -typedef ::std::complex< double > __pyx_t_double_complex; -typedef ::std::complex< float > __pyx_t_float_complex; - -#include "conversions_api.h" - -#ifndef EIGENCY_CPP -#define EIGENCY_CPP - -namespace eigency { - -template -inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); -template -inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); - -// Strides: -// Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and -// inner strides, which are dependent on whether the array/matrix is row-major of column-major: -// Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major). -// Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major). -// In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented. -// Consequently, a switch in numpy storage order from row-major to column-major involves a switch -// in strides, while it does not affect the stride in Eigen. -template<> -inline PyArrayObject *_ndarray_view(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) { - // Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride - // If no stride is given, the row_stride is set to the number of columns. - return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - } else { - // Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride - // If no stride is given, the cow_stride is set to the number of rows. - return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); - } -} -template<> -inline PyArrayObject *_ndarray_copy(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - - -template -inline PyArrayObject *ndarray(Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -// If C++11 is available, check if m is an r-value reference, in -// which case a copy should always be made -#if __cplusplus >= 201103L -template -inline PyArrayObject *ndarray(Eigen::PlainObjectBase &&m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -#endif -template -inline PyArrayObject *ndarray(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} - -template -inline PyArrayObject *ndarray(Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - // Since this is a map, we assume that ownership is correctly taken care - // of, and we avoid taking a copy - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_view(Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_view(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_copy(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} - - -template > -class MapBase: public Eigen::Map { -public: - typedef Eigen::Map Base; - typedef typename Base::Scalar Scalar; - - MapBase(Scalar* data, - long rows, - long cols, - _StrideType stride=_StrideType()) - : Base(data, - // If both dimensions are dynamic or dimensions match, accept dimensions as they are - ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || - (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) - ? rows - // otherwise, test if swapping them makes them fit - : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) - ? cols - : rows), - ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || - (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) - ? cols - : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) - ? rows - : cols), - stride - ) {} - - MapBase &operator=(const MatrixType &other) { - Base::operator=(other); - return *this; - } - - virtual ~MapBase() { } -}; - - -template class EigencyDenseBase, - typename Scalar, - int _Rows, int _Cols, - int _Options = Eigen::AutoAlign | -#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4 - // workaround a bug in at least gcc 3.4.6 - // the innermost ?: ternary operator is misparsed. We write it slightly - // differently and this makes gcc 3.4.6 happy, but it's ugly. - // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined - // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor) - ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor -// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19 -#if EIGEN_VERSION_AT_LEAST(3,2,90) - : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION -#else - : !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION -#endif - : ColMajor ), -#else - ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor - : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor -// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19 -#if EIGEN_VERSION_AT_LEAST(3,2,90) - : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), -#else - : Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), -#endif -#endif - int _MapOptions = Eigen::Unaligned, - int _StrideOuter=0, int _StrideInner=0, - int _MaxRows = _Rows, - int _MaxCols = _Cols> -class FlattenedMap: public MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > { -public: - typedef MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base; - - FlattenedMap() - : Base(NULL, 0, 0), - object_(NULL) {} - - FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0) - : Base(data, rows, cols, - Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)), - object_(NULL) { - } - - FlattenedMap(PyArrayObject *object) - : Base((Scalar *)((PyArrayObject*)object)->data, - // : Base(_from_numpy((PyArrayObject*)object), - (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, - (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0], - Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, - _StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])), - object_(object) { - - if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) - throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); - - Py_XINCREF(object_); - } - FlattenedMap &operator=(const FlattenedMap &other) { - if (other.object_) { - new (this) FlattenedMap(other.object_); - } else { - // Replace the memory that we point to (not a memory allocation) - new (this) FlattenedMap(const_cast(other.data()), - other.rows(), - other.cols(), - other.outerStride(), - other.innerStride()); - } - - return *this; - } - - operator Base() const { - return static_cast(*this); - } - - operator Base&() const { - return static_cast(*this); - } - - operator EigencyDenseBase() const { - return EigencyDenseBase(static_cast(*this)); - } - - virtual ~FlattenedMap() { - Py_XDECREF(object_); - } - -private: - PyArrayObject * const object_; -}; - - -template -class Map: public MapBase { -public: - typedef MapBase Base; - typedef typename MatrixType::Scalar Scalar; - - enum { - RowsAtCompileTime = Base::Base::RowsAtCompileTime, - ColsAtCompileTime = Base::Base::ColsAtCompileTime - }; - - Map() - : Base(NULL, - (RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime, - (ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime), - object_(NULL) { - } - - Map(Scalar *data, long rows, long cols) - : Base(data, rows, cols), - object_(NULL) {} - - Map(PyArrayObject *object) - : Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data, - // ROW: If array is in row-major order, transpose (see README) - (PyObject*)object == Py_None? 0 : - (!PyArray_IS_F_CONTIGUOUS(object) - ? ((object->nd == 1) - ? 1 // ROW: If 1D row-major numpy array, set to 1 (row vector) - : object->dimensions[1]) - : object->dimensions[0]), - // COLUMN: If array is in row-major order: transpose (see README) - (PyObject*)object == Py_None? 0 : - (!PyArray_IS_F_CONTIGUOUS(object) - ? object->dimensions[0] - : ((object->nd == 1) - ? 1 // COLUMN: If 1D col-major numpy array, set to length (column vector) - : object->dimensions[1]))), - object_(object) { - - if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) - throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); - Py_XINCREF(object_); - } - - Map &operator=(const Map &other) { - if (other.object_) { - new (this) Map(other.object_); - } else { - // Replace the memory that we point to (not a memory allocation) - new (this) Map(const_cast(other.data()), - other.rows(), - other.cols()); - } - - return *this; - } - - Map &operator=(const MatrixType &other) { - MapBase::operator=(other); - return *this; - } - - operator Base() const { - return static_cast(*this); - } - - operator Base&() const { - return static_cast(*this); - } - - operator MatrixType() const { - return MatrixType(static_cast(*this)); - } - - virtual ~Map() { - Py_XDECREF(object_); - } - -private: - PyArrayObject * const object_; -}; - - -} - -#endif - - - diff --git a/cython/requirements.txt b/cython/requirements.txt deleted file mode 100644 index 8d3c7aeb4..000000000 --- a/cython/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -Cython>=0.25.2 -backports_abc>=0.5 -numpy>=1.11.0 diff --git a/examples/Data/Klaus3.g2o b/examples/Data/Klaus3.g2o index 4c7233fa7..83a6e6fd2 100644 --- a/examples/Data/Klaus3.g2o +++ b/examples/Data/Klaus3.g2o @@ -1,6 +1,6 @@ -VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109 -VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809 -VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403 -EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063 +VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862 +VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738 +EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 diff --git a/examples/IMUKittiExampleGPS.cpp b/examples/IMUKittiExampleGPS.cpp index f1d89b47a..e2ca49647 100644 --- a/examples/IMUKittiExampleGPS.cpp +++ b/examples/IMUKittiExampleGPS.cpp @@ -287,7 +287,7 @@ int main(int argc, char* argv[]) { new_values.insert(current_pose_key, gps_pose); printf("################ POSE INCLUDED AT TIME %lf ################\n", t); - gps_pose.translation().print(); + cout << gps_pose.translation(); printf("\n\n"); } else { new_values.insert(current_pose_key, current_pose_global); diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index 8736a5954..37c4a1f88 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -133,6 +133,18 @@ endif() # paths so that the compiler uses GTSAM headers in our source directory instead # of any previously installed GTSAM headers. target_include_directories(gtsam BEFORE PUBLIC + # main gtsam includes: + $ + $ + # config.h + $ + # unit tests: + $ +) +# 3rdparty libraries: use the "system" flag so they are included via "-isystem" +# and warnings (and warnings-considered-errors) in those headers are not +# reported as warnings/errors in our targets: +target_include_directories(gtsam SYSTEM BEFORE PUBLIC # SuiteSparse_config $ $ @@ -141,13 +153,6 @@ target_include_directories(gtsam BEFORE PUBLIC # CCOLAMD $ $ - # main gtsam includes: - $ - $ - # config.h - $ - # unit tests: - $ ) if(GTSAM_SUPPORT_NESTED_DISSECTION) target_include_directories(gtsam BEFORE PUBLIC @@ -212,5 +217,5 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX) endif() # Wrap - wrap_and_install_library(../gtsam.h "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}") + wrap_and_install_library(gtsam.i "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}") endif () diff --git a/gtsam/config.h.in b/gtsam/config.h.in index 9dc10c36a..9d1bd4ebd 100644 --- a/gtsam/config.h.in +++ b/gtsam/config.h.in @@ -72,9 +72,6 @@ // Make sure dependent projects that want it can see deprecated functions #cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41 -// Publish flag about Eigen typedef -#cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS - // Support Metis-based nested dissection #cmakedefine GTSAM_SUPPORT_NESTED_DISSECTION diff --git a/gtsam/geometry/Point2.cpp b/gtsam/geometry/Point2.cpp index 4cead869f..d8060cfcf 100644 --- a/gtsam/geometry/Point2.cpp +++ b/gtsam/geometry/Point2.cpp @@ -50,37 +50,6 @@ double distance2(const Point2& p, const Point2& q, OptionalJacobian<1, 2> H1, } } -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS - -/* ************************************************************************* */ -void Point2::print(const string& s) const { - cout << s << *this << endl; -} - -/* ************************************************************************* */ -bool Point2::equals(const Point2& q, double tol) const { - return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol); -} - -/* ************************************************************************* */ -double Point2::norm(OptionalJacobian<1,2> H) const { - return gtsam::norm2(*this, H); -} - -/* ************************************************************************* */ -double Point2::distance(const Point2& point, OptionalJacobian<1,2> H1, - OptionalJacobian<1,2> H2) const { - return gtsam::distance2(*this, point, H1, H2); -} - -/* ************************************************************************* */ -ostream &operator<<(ostream &os, const Point2& p) { - os << '(' << p.x() << ", " << p.y() << ')'; - return os; -} - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS - /* ************************************************************************* */ // Math inspired by http://paulbourke.net/geometry/circlesphere/ boost::optional circleCircleIntersection(double R_d, double r_d, diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index e186f7b67..e6574fe41 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -22,111 +22,9 @@ namespace gtsam { -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - - /// As of GTSAM 4, in order to make GTSAM more lean, - /// it is now possible to just typedef Point2 to Vector2 - typedef Vector2 Point2; - -#else - -/** - * A 2D point - * Complies with the Testable Concept - * Functional, so no set functions: once created, a point is constant. - * @addtogroup geometry - * \nosubgrouping - */ -class Point2 : public Vector2 { -private: - -public: - enum { dimension = 2 }; - /// @name Standard Constructors - /// @{ - - /// default constructor - Point2() {} - - using Vector2::Vector2; - - /// @} - /// @name Advanced Constructors - /// @{ - - /// construct from 2D vector - explicit Point2(const Vector2& v):Vector2(v) {} - /// @} - /// @name Testable - /// @{ - - /// print with optional string - GTSAM_EXPORT void print(const std::string& s = "") const; - - /// equals with an tolerance, prints out message if unequal - GTSAM_EXPORT bool equals(const Point2& q, double tol = 1e-9) const; - - /// @} - /// @name Group - /// @{ - - /// identity - inline static Point2 identity() {return Point2(0,0);} - - /// @} - /// @name Vector Space - /// @{ - - /** creates a unit vector */ - Point2 unit() const { return *this/norm(); } - - /** norm of point, with derivative */ - GTSAM_EXPORT double norm(OptionalJacobian<1,2> H = boost::none) const; - - /** distance between two points */ - GTSAM_EXPORT double distance(const Point2& p2, OptionalJacobian<1,2> H1 = boost::none, - OptionalJacobian<1,2> H2 = boost::none) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// equality - inline bool operator ==(const Point2& q) const {return x()==q.x() && y()==q.y();} - - /// get x - inline double x() const {return (*this)[0];} - - /// get y - inline double y() const {return (*this)[1];} - - /// return vectorized form (column-wise). - const Vector2& vector() const { return *this; } - - /// @} - - /// Streaming - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p); - - private: - /// @name Advanced Interface - /// @{ - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) - { - ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector2);} - - /// @} -}; - -template<> -struct traits : public internal::VectorSpace { -}; - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS +/// As of GTSAM 4, in order to make GTSAM more lean, +/// it is now possible to just typedef Point2 to Vector2 +typedef Vector2 Point2; /// Distance of the point from the origin, with Jacobian GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none); diff --git a/gtsam/geometry/Point3.cpp b/gtsam/geometry/Point3.cpp index 25fb9b92d..7a46f5988 100644 --- a/gtsam/geometry/Point3.cpp +++ b/gtsam/geometry/Point3.cpp @@ -22,47 +22,6 @@ using namespace std; namespace gtsam { -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -bool Point3::equals(const Point3 &q, double tol) const { - return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol && - std::abs(z() - q.z()) < tol); -} - -void Point3::print(const string& s) const { - cout << s << *this << endl; -} - -/* ************************************************************************* */ -double Point3::distance(const Point3 &q, OptionalJacobian<1, 3> H1, - OptionalJacobian<1, 3> H2) const { - return gtsam::distance3(*this,q,H1,H2); -} - -double Point3::norm(OptionalJacobian<1,3> H) const { - return gtsam::norm3(*this, H); -} - -Point3 Point3::normalized(OptionalJacobian<3,3> H) const { - return gtsam::normalize(*this, H); -} - -Point3 Point3::cross(const Point3 &q, OptionalJacobian<3, 3> H1, - OptionalJacobian<3, 3> H2) const { - return gtsam::cross(*this, q, H1, H2); -} - -double Point3::dot(const Point3 &q, OptionalJacobian<1, 3> H1, - OptionalJacobian<1, 3> H2) const { - return gtsam::dot(*this, q, H1, H2); -} - -/* ************************************************************************* */ -ostream &operator<<(ostream &os, const Point3& p) { - os << '[' << p.x() << ", " << p.y() << ", " << p.z() << "]'"; - return os; -} - -#endif /* ************************************************************************* */ double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1, OptionalJacobian<1, 3> H2) { diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 1ab5c313e..19e328022 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -29,106 +29,9 @@ namespace gtsam { -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - - /// As of GTSAM 4, in order to make GTSAM more lean, - /// it is now possible to just typedef Point3 to Vector3 - typedef Vector3 Point3; - -#else - -/** - * A 3D point is just a Vector3 with some additional methods - * @addtogroup geometry - * \nosubgrouping - */ -class Point3 : public Vector3 { - - public: - - enum { dimension = 3 }; - - /// @name Standard Constructors - /// @{ - - using Vector3::Vector3; - - /// @} - /// @name Testable - /// @{ - - /** print with optional string */ - GTSAM_EXPORT void print(const std::string& s = "") const; - - /** equals with an tolerance */ - GTSAM_EXPORT bool equals(const Point3& p, double tol = 1e-9) const; - - /// @} - /// @name Group - /// @{ - - /// identity for group operation - inline static Point3 identity() { return Point3(0.0, 0.0, 0.0); } - - /// @} - /// @name Vector Space - /// @{ - - /** distance between two points */ - GTSAM_EXPORT double distance(const Point3& p2, OptionalJacobian<1, 3> H1 = boost::none, - OptionalJacobian<1, 3> H2 = boost::none) const; - - /** Distance of the point from the origin, with Jacobian */ - GTSAM_EXPORT double norm(OptionalJacobian<1,3> H = boost::none) const; - - /** normalize, with optional Jacobian */ - GTSAM_EXPORT Point3 normalized(OptionalJacobian<3, 3> H = boost::none) const; - - /** cross product @return this x q */ - GTSAM_EXPORT Point3 cross(const Point3 &q, OptionalJacobian<3, 3> H_p = boost::none, // - OptionalJacobian<3, 3> H_q = boost::none) const; - - /** dot product @return this * q*/ - GTSAM_EXPORT double dot(const Point3 &q, OptionalJacobian<1, 3> H_p = boost::none, // - OptionalJacobian<1, 3> H_q = boost::none) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// return as Vector3 - const Vector3& vector() const { return *this; } - - /// get x - inline double x() const {return (*this)[0];} - - /// get y - inline double y() const {return (*this)[1];} - - /// get z - inline double z() const {return (*this)[2];} - - /// @} - - /// Output stream operator - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point3& p); - - private: - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector3); - } - }; - -template<> -struct traits : public internal::VectorSpace {}; - -template<> -struct traits : public internal::VectorSpace {}; - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS +/// As of GTSAM 4, in order to make GTSAM more lean, +/// it is now possible to just typedef Point3 to Vector3 +typedef Vector3 Point3; // Convenience typedef typedef std::pair Point3Pair; diff --git a/gtsam/geometry/Rot2.cpp b/gtsam/geometry/Rot2.cpp index 04ed16774..7502c4ccf 100644 --- a/gtsam/geometry/Rot2.cpp +++ b/gtsam/geometry/Rot2.cpp @@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) { return R.normalize(); } +/* ************************************************************************* */ +Rot2 Rot2::Random(std::mt19937& rng) { + uniform_real_distribution randomAngle(-M_PI, M_PI); + double angle = randomAngle(rng); + return fromAngle(angle); +} + /* ************************************************************************* */ void Rot2::print(const string& s) const { cout << s << ": " << theta() << endl; diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index f46f12540..8a361f558 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { /** @@ -79,6 +81,14 @@ namespace gtsam { /** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */ static Rot2 atan2(double y, double x); + /** + * Random, generates random angle \in [-p,pi] + * Example: + * std::mt19937 engine(42); + * Unit3 unit = Unit3::Random(engine); + */ + static Rot2 Random(std::mt19937 & rng); + /// @} /// @name Testable /// @{ diff --git a/gtsam/geometry/SOn-inl.h b/gtsam/geometry/SOn-inl.h index 9edce8336..6180f4cc7 100644 --- a/gtsam/geometry/SOn-inl.h +++ b/gtsam/geometry/SOn-inl.h @@ -60,8 +60,9 @@ typename SO::TangentVector SO::ChartAtOrigin::Local(const SO& R, template typename SO::MatrixDD SO::AdjointMap() const { + if (N==2) return I_1x1; // SO(2) case throw std::runtime_error( - "SO::AdjointMap only implemented for SO3 and SO4."); + "SO::AdjointMap only implemented for SO2, SO3 and SO4."); } template @@ -84,26 +85,22 @@ typename SO::MatrixDD SO::LogmapDerivative(const TangentVector& omega) { throw std::runtime_error("O::LogmapDerivative only implemented for SO3."); } +// Default fixed size version (but specialized elsewehere for N=2,3,4) template typename SO::VectorN2 SO::vec( OptionalJacobian H) const { - const size_t n = rows(); - const size_t n2 = n * n; - // Vectorize - VectorN2 X(n2); - X << Eigen::Map(matrix_.data(), n2, 1); + VectorN2 X = Eigen::Map(matrix_.data()); // If requested, calculate H as (I \oplus Q) * P, // where Q is the N*N rotation matrix, and P is calculated below. if (H) { // Calculate P matrix of vectorized generators // TODO(duy): Should we refactor this as the jacobian of Hat? - Matrix P = VectorizedGenerators(n); - const size_t d = dim(); - H->resize(n2, d); - for (size_t i = 0; i < n; i++) { - H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + Matrix P = SO::VectorizedGenerators(); + for (size_t i = 0; i < N; i++) { + H->block(i * N, 0, N, dimension) = + matrix_ * P.block(i * N, 0, N, dimension); } } return X; diff --git a/gtsam/geometry/SOn.cpp b/gtsam/geometry/SOn.cpp index 37b6c1784..c6cff4214 100644 --- a/gtsam/geometry/SOn.cpp +++ b/gtsam/geometry/SOn.cpp @@ -22,21 +22,18 @@ namespace gtsam { template <> -GTSAM_EXPORT -Matrix SOn::Hat(const Vector& xi) { +GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref X) { size_t n = AmbientDim(xi.size()); - if (n < 2) throw std::invalid_argument("SO::Hat: n<2 not supported"); - - Matrix X(n, n); // allocate space for n*n skew-symmetric matrix - X.setZero(); - if (n == 2) { + if (n < 2) + throw std::invalid_argument("SO::Hat: n<2 not supported"); + else if (n == 2) { // Handle SO(2) case as recursion bottom assert(xi.size() == 1); X << 0, -xi(0), xi(0), 0; } else { // Recursively call SO(n-1) call for top-left block const size_t dmin = (n - 1) * (n - 2) / 2; - X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin)); + Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1)); // determine sign of last element (signs alternate) double sign = pow(-1.0, xi.size()); @@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) { X(j, n - 1) = -X(n - 1, j); sign = -sign; } + X(n - 1, n - 1) = 0; // bottom-right } +} + +template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) { + size_t n = AmbientDim(xi.size()); + Matrix X(n, n); // allocate space for n*n skew-symmetric matrix + SOn::Hat(xi, X); return X; } @@ -99,4 +103,27 @@ SOn LieGroup::between(const SOn& g, DynamicJacobian H1, return result; } +// Dynamic version of vec +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const { + const size_t n = rows(), n2 = n * n; + + // Vectorize + VectorN2 X(n2); + X << Eigen::Map(matrix_.data(), n2, 1); + + // If requested, calculate H as (I \oplus Q) * P, + // where Q is the N*N rotation matrix, and P is calculated below. + if (H) { + // Calculate P matrix of vectorized generators + // TODO(duy): Should we refactor this as the jacobian of Hat? + Matrix P = SOn::VectorizedGenerators(n); + const size_t d = dim(); + H->resize(n2, d); + for (size_t i = 0; i < n; i++) { + H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + } + } + return X; +} + } // namespace gtsam diff --git a/gtsam/geometry/SOn.h b/gtsam/geometry/SOn.h index 004569416..86b6019e1 100644 --- a/gtsam/geometry/SOn.h +++ b/gtsam/geometry/SOn.h @@ -98,8 +98,8 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static SO Lift(size_t n, const Eigen::MatrixBase &R) { Matrix Q = Matrix::Identity(n, n); - size_t p = R.rows(); - assert(p < n && R.cols() == p); + const int p = R.rows(); + assert(p >= 0 && p <= static_cast(n) && R.cols() == p); Q.topLeftCorner(p, p) = R; return SO(Q); } @@ -208,7 +208,7 @@ class SO : public LieGroup, internal::DimensionSO(N)> { // Calculate run-time dimensionality of manifold. // Available as dimension or Dim() for fixed N. - size_t dim() const { return Dimension(matrix_.rows()); } + size_t dim() const { return Dimension(static_cast(matrix_.rows())); } /** * Hat operator creates Lie algebra element corresponding to d-vector, where d @@ -227,9 +227,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { */ static MatrixNN Hat(const TangentVector& xi); - /** - * Inverse of Hat. See note about xi element order in Hat. - */ + /// In-place version of Hat (see details there), implements recursion. + static void Hat(const Vector &xi, Eigen::Ref X); + + /// Inverse of Hat. See note about xi element order in Hat. static TangentVector Vee(const MatrixNN& X); // Chart at origin @@ -295,10 +296,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static Matrix VectorizedGenerators() { constexpr size_t N2 = static_cast(N * N); - Matrix G(N2, dimension); + Eigen::Matrix G; for (size_t j = 0; j < dimension; j++) { const auto X = Hat(Vector::Unit(dimension, j)); - G.col(j) = Eigen::Map(X.data(), N2, 1); + G.col(j) = Eigen::Map(X.data()); } return G; } @@ -362,6 +363,11 @@ template <> SOn LieGroup::between(const SOn& g, DynamicJacobian H1, DynamicJacobian H2) const; +/* + * Specialize dynamic vec. + */ +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const; + /** Serialization function */ template void serialize( diff --git a/gtsam/geometry/tests/testPoint2.cpp b/gtsam/geometry/tests/testPoint2.cpp index 8b9e8a7e6..6e4d408c7 100644 --- a/gtsam/geometry/tests/testPoint2.cpp +++ b/gtsam/geometry/tests/testPoint2.cpp @@ -237,16 +237,6 @@ TEST( Point2, circleCircleIntersection) { } -/* ************************************************************************* */ -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -TEST( Point2, stream) { - Point2 p(1, 2); - std::ostringstream os; - os << p; - EXPECT(os.str() == "(1, 2)"); -} -#endif - /* ************************************************************************* */ int main () { TestResult tr; diff --git a/gtsam/geometry/tests/testPoint3.cpp b/gtsam/geometry/tests/testPoint3.cpp index e2396f7e9..a7c2ac50c 100644 --- a/gtsam/geometry/tests/testPoint3.cpp +++ b/gtsam/geometry/tests/testPoint3.cpp @@ -153,16 +153,6 @@ TEST( Point3, cross2) { } } -/* ************************************************************************* */ -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -TEST( Point3, stream) { - Point3 p(1, 2, -3); - std::ostringstream os; - os << p; - EXPECT(os.str() == "[1, 2, -3]'"); -} -#endif - //************************************************************************* TEST (Point3, normalize) { Matrix actualH; diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index 596fa8957..c94e21ba5 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -864,11 +864,7 @@ TEST( Pose3, stream) os << T; string expected; -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0\n0\n0";; -#else - expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: [0, 0, 0]'"; -#endif EXPECT(os.str() == expected); } @@ -1043,13 +1039,9 @@ TEST(Pose3, print) { // Add expected rotation expected << "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\n"; -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS expected << "t: 1\n" "2\n" "3\n"; -#else - expected << "t: [" << translation.x() << ", " << translation.y() << ", " << translation.z() << "]'\n"; -#endif // reset cout to the original stream std::cout.rdbuf(oldbuf); diff --git a/gtsam/geometry/tests/testSOn.cpp b/gtsam/geometry/tests/testSOn.cpp index 1cf8caed2..4d0ed98b3 100644 --- a/gtsam/geometry/tests/testSOn.cpp +++ b/gtsam/geometry/tests/testSOn.cpp @@ -39,8 +39,8 @@ using namespace std; using namespace gtsam; //****************************************************************************** -// Test dhynamic with n=0 -TEST(SOn, SO0) { +// Test dynamic with n=0 +TEST(SOn, SOn0) { const auto R = SOn(0); EXPECT_LONGS_EQUAL(0, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -50,7 +50,8 @@ TEST(SOn, SO0) { } //****************************************************************************** -TEST(SOn, SO5) { +// Test dynamic with n=5 +TEST(SOn, SOn5) { const auto R = SOn(5); EXPECT_LONGS_EQUAL(5, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -59,6 +60,28 @@ TEST(SOn, SO5) { EXPECT_LONGS_EQUAL(10, traits::GetDimension(R)); } +//****************************************************************************** +// Test fixed with n=2 +TEST(SOn, SO0) { + const auto R = SO<2>(); + EXPECT_LONGS_EQUAL(2, R.rows()); + EXPECT_LONGS_EQUAL(1, SO<2>::dimension); + EXPECT_LONGS_EQUAL(1, SO<2>::Dim()); + EXPECT_LONGS_EQUAL(1, R.dim()); + EXPECT_LONGS_EQUAL(1, traits>::GetDimension(R)); +} + +//****************************************************************************** +// Test fixed with n=5 +TEST(SOn, SO5) { + const auto R = SO<5>(); + EXPECT_LONGS_EQUAL(5, R.rows()); + EXPECT_LONGS_EQUAL(10, SO<5>::dimension); + EXPECT_LONGS_EQUAL(10, SO<5>::Dim()); + EXPECT_LONGS_EQUAL(10, R.dim()); + EXPECT_LONGS_EQUAL(10, traits>::GetDimension(R)); +} + //****************************************************************************** TEST(SOn, Concept) { BOOST_CONCEPT_ASSERT((IsGroup)); @@ -105,29 +128,29 @@ TEST(SOn, HatVee) { EXPECT(assert_equal((Vector)v.head<1>(), SOn::Vee(actual2))); Matrix expected3(3, 3); - expected3 << 0, -3, 2, // - 3, 0, -1, // - -2, 1, 0; + expected3 << 0, -3, 2, // + 3, 0, -1, // + -2, 1, 0; const auto actual3 = SOn::Hat(v.head<3>()); EXPECT(assert_equal(expected3, actual3)); EXPECT(assert_equal(skewSymmetric(1, 2, 3), actual3)); EXPECT(assert_equal((Vector)v.head<3>(), SOn::Vee(actual3))); Matrix expected4(4, 4); - expected4 << 0, -6, 5, 3, // - 6, 0, -4, -2, // - -5, 4, 0, 1, // - -3, 2, -1, 0; + expected4 << 0, -6, 5, 3, // + 6, 0, -4, -2, // + -5, 4, 0, 1, // + -3, 2, -1, 0; const auto actual4 = SOn::Hat(v.head<6>()); EXPECT(assert_equal(expected4, actual4)); EXPECT(assert_equal((Vector)v.head<6>(), SOn::Vee(actual4))); Matrix expected5(5, 5); - expected5 << 0,-10, 9, 7, -4, // - 10, 0, -8, -6, 3, // - -9, 8, 0, 5, -2, // - -7, 6, -5, 0, 1, // - 4, -3, 2, -1, 0; + expected5 << 0, -10, 9, 7, -4, // + 10, 0, -8, -6, 3, // + -9, 8, 0, 5, -2, // + -7, 6, -5, 0, 1, // + 4, -3, 2, -1, 0; const auto actual5 = SOn::Hat(v); EXPECT(assert_equal(expected5, actual5)); EXPECT(assert_equal((Vector)v, SOn::Vee(actual5))); @@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) { CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7)); } +//****************************************************************************** + +Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); } + +/// Test Jacobian of Retract at origin +TEST(SOn, RetractJacobian) { + Matrix actualH = RetractJacobian(3); + boost::function h = [](const Vector &v) { + return SOn::ChartAtOrigin::Retract(v).matrix(); + }; + Vector3 v; + v.setZero(); + const Matrix expectedH = numericalDerivative11(h, v, 1e-5); + CHECK(assert_equal(expectedH, actualH)); +} + //****************************************************************************** TEST(SOn, vec) { Vector10 v; @@ -166,11 +205,28 @@ TEST(SOn, vec) { SOn Q = SOn::ChartAtOrigin::Retract(v); Matrix actualH; const Vector actual = Q.vec(actualH); - boost::function h = [](const SOn& Q) { return Q.vec(); }; + boost::function h = [](const SOn &Q) { return Q.vec(); }; const Matrix H = numericalDerivative11(h, Q, 1e-5); CHECK(assert_equal(H, actualH)); } +//****************************************************************************** +TEST(SOn, VectorizedGenerators) { + // Default fixed + auto actual2 = SO<2>::VectorizedGenerators(); + CHECK(actual2.rows()==4 && actual2.cols()==1) + + // Specialized + auto actual3 = SO<3>::VectorizedGenerators(); + CHECK(actual3.rows()==9 && actual3.cols()==3) + auto actual4 = SO<4>::VectorizedGenerators(); + CHECK(actual4.rows()==16 && actual4.cols()==6) + + // Dynamic + auto actual5 = SOn::VectorizedGenerators(5); + CHECK(actual5.rows()==25 && actual5.cols()==10) +} + //****************************************************************************** int main() { TestResult tr; diff --git a/gtsam.h b/gtsam/gtsam.i similarity index 95% rename from gtsam.h rename to gtsam/gtsam.i index 86bfa05b7..d67b74812 100644 --- a/gtsam.h +++ b/gtsam/gtsam.i @@ -618,6 +618,9 @@ class SOn { // Other methods Vector vec() const; Matrix matrix() const; + + // enabling serialization functionality + void serialize() const; }; #include @@ -2171,7 +2174,11 @@ class Values { // void insert(size_t j, const gtsam::Value& value); // void update(size_t j, const gtsam::Value& val); // gtsam::Value at(size_t j) const; - + + // The order is important: Vector has to precede Point2/Point3 so `atVector` + // can work for those fixed-size vectors. + void insert(size_t j, Vector vector); + void insert(size_t j, Matrix matrix); void insert(size_t j, const gtsam::Point2& point2); void insert(size_t j, const gtsam::Point3& point3); void insert(size_t j, const gtsam::Rot2& rot2); @@ -2188,8 +2195,6 @@ class Values { void insert(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera); void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::NavState& nav_state); - void insert(size_t j, Vector vector); - void insert(size_t j, Matrix matrix); void update(size_t j, const gtsam::Point2& point2); void update(size_t j, const gtsam::Point3& point3); @@ -2797,16 +2802,16 @@ class SfmData { string findExampleDataFile(string name); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise, bool smart); + gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise, bool smart); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise); + gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID); + gtsam::noiseModel::Diagonal* model, int maxIndex); pair load2D(string filename, gtsam::noiseModel::Diagonal* model); pair load2D(string filename); pair load2D_robust(string filename, - gtsam::noiseModel::Base* model); + gtsam::noiseModel::Base* model, int maxIndex); void save2D(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config, gtsam::noiseModel::Diagonal* model, string filename); @@ -2816,8 +2821,8 @@ class BetweenFactorPose3s { BetweenFactorPose3s(); size_t size() const; - gtsam::BetweenFactorPose3* at(size_t i) const; - void push_back(const gtsam::BetweenFactorPose3* factor); + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); }; #include @@ -2855,7 +2860,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor { }; #include -gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel( +gtsam::noiseModel::Isotropic* ConvertNoiseModel( gtsam::noiseModel::Base* model, size_t d); template @@ -2874,12 +2879,14 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { Vector evaluateError(const T& R1, const T& R2); }; -virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor { - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, +#include + +virtual class ShonanFactor3 : gtsam::NoiseModelFactor { + ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12, size_t p); - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, - size_t p, gtsam::noiseModel::Base* model); - Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2); + ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12, + size_t p, gtsam::noiseModel::Base *model); + Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2); }; #include @@ -2895,6 +2902,125 @@ class BinaryMeasurement { typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; +#include + +// TODO(frank): copy/pasta below until we have integer template paremeters in wrap! + +class ShonanAveragingParameters2 { + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot2& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveragingParameters3 { + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot3& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveraging2 { + ShonanAveraging2(string g2oFile); + ShonanAveraging2(string g2oFile, + const gtsam::ShonanAveragingParameters2 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot2 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + gtsam::Values initializeRandomlyAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + +class ShonanAveraging3 { + ShonanAveraging3(string g2oFile); + ShonanAveraging3(string g2oFile, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // TODO(frank): deprecate once we land pybind wrapper + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors); + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot3 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + gtsam::Values initializeRandomlyAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + //************************************************************************* // Navigation //************************************************************************* diff --git a/gtsam/navigation/tests/testNavState.cpp b/gtsam/navigation/tests/testNavState.cpp index c07240752..d38b76255 100644 --- a/gtsam/navigation/tests/testNavState.cpp +++ b/gtsam/navigation/tests/testNavState.cpp @@ -215,11 +215,7 @@ TEST(NavState, Stream) os << state; string expected; -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: 0\n0\n0\nv: 0\n0\n0"; -#else - expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: [0, 0, 0]'\nv: [0, 0, 0]'"; -#endif EXPECT(os.str() == expected); } diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 328a3facf..9a9c487b6 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -90,7 +90,7 @@ void NonlinearOptimizer::defaultOptimize() { // Iterative loop do { // Do next iteration - currentError = error(); + currentError = error(); // TODO(frank): don't do this twice at first !? Computed above! iterate(); tictoc_finishedIteration(); diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp new file mode 100644 index 000000000..2485418cf --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -0,0 +1,854 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.cpp + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +// In Wrappers we have no access to this so have a default ready +static std::mt19937 kRandomNumberGenerator(42); + +using Sparse = Eigen::SparseMatrix; + +/* ************************************************************************* */ +template +ShonanAveragingParameters::ShonanAveragingParameters( + const LevenbergMarquardtParams &_lm, const std::string &method, + double optimalityThreshold, double alpha, double beta, double gamma) + : lm(_lm), optimalityThreshold(optimalityThreshold), alpha(alpha), + beta(beta), gamma(gamma) { + // By default, we will do conjugate gradient + lm.linearSolverType = LevenbergMarquardtParams::Iterative; + + // Create subgraph builder parameters + SubgraphBuilderParameters builderParameters; + builderParameters.skeletonType = SubgraphBuilderParameters::KRUSKAL; + builderParameters.skeletonWeight = SubgraphBuilderParameters::EQUAL; + builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; + builderParameters.augmentationFactor = 0.0; + + auto pcg = boost::make_shared(); + + // Choose optimization method + if (method == "SUBGRAPH") { + lm.iterativeParams = + boost::make_shared(builderParameters); + } else if (method == "SGPC") { + pcg->preconditioner_ = + boost::make_shared(builderParameters); + lm.iterativeParams = pcg; + } else if (method == "JACOBI") { + pcg->preconditioner_ = + boost::make_shared(); + lm.iterativeParams = pcg; + } else if (method == "QR") { + lm.setLinearSolverType("MULTIFRONTAL_QR"); + } else if (method == "CHOLESKY") { + lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY"); + } else { + throw std::invalid_argument("ShonanAveragingParameters: unknown method"); + } +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template struct ShonanAveragingParameters<2>; +template struct ShonanAveragingParameters<3>; + +/* ************************************************************************* */ +// Calculate number of unknown rotations referenced by measurements +template +static size_t +NrUnknowns(const typename ShonanAveraging::Measurements &measurements) { + std::set keys; + for (const auto &measurement : measurements) { + keys.insert(measurement.key1()); + keys.insert(measurement.key2()); + } + return keys.size(); +} + +/* ************************************************************************* */ +template +ShonanAveraging::ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters) + : parameters_(parameters), measurements_(measurements), + nrUnknowns_(NrUnknowns(measurements)) { + for (const auto &measurement : measurements_) { + const auto &model = measurement.noiseModel(); + if (model && model->dim() != SO::dimension) { + measurement.print("Factor with incorrect noise model:\n"); + throw std::invalid_argument("ShonanAveraging: measurements passed to " + "constructor have incorrect dimension."); + } + } + Q_ = buildQ(); + D_ = buildD(); + L_ = D_ - Q_; +} + +/* ************************************************************************* */ +template +NonlinearFactorGraph ShonanAveraging::buildGraphAt(size_t p) const { + NonlinearFactorGraph graph; + auto G = boost::make_shared(SO<-1>::VectorizedGenerators(p)); + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>(keys[0], keys[1], Rij, p, model, G); + } + + // Possibly add Karcher prior + if (parameters_.beta > 0) { + const size_t dim = SOn::Dimension(p); + graph.emplace_shared>(graph.keys(), dim); + } + + // Possibly add gauge factors - they are probably useless as gradient is zero + if (parameters_.gamma > 0 && p > d + 1) { + for (auto key : graph.keys()) + graph.emplace_shared(key, p, d, parameters_.gamma); + } + + return graph; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::costAt(size_t p, const Values &values) const { + const NonlinearFactorGraph graph = buildGraphAt(p); + return graph.error(values); +} + +/* ************************************************************************* */ +template +boost::shared_ptr +ShonanAveraging::createOptimizerAt(size_t p, const Values &initial) const { + // Build graph + NonlinearFactorGraph graph = buildGraphAt(p); + + // Anchor prior is added here as depends on initial value (and cost is zero) + if (parameters_.alpha > 0) { + size_t i; + Rot value; + const size_t dim = SOn::Dimension(p); + std::tie(i, value) = parameters_.anchor; + auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha); + graph.emplace_shared>(i, SOn::Lift(p, value.matrix()), + model); + } + + // Optimize + return boost::make_shared(graph, initial, + parameters_.lm); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::tryOptimizingAt(size_t p, + const Values &initial) const { + auto lm = createOptimizerAt(p, initial); + return lm->optimize(); +} + +/* ************************************************************************* */ +// Project to pxdN Stiefel manifold +template +Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { + const size_t N = values.size(); + const size_t p = values.at(0).rows(); + Matrix S(p, N * d); + for (const auto it : values.filter()) { + S.middleCols(it.key * d) = + it.value.matrix().leftCols(); // project Qj to Stiefel + } + return S; +} + +/* ************************************************************************* */ +template <> +Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0)); + result.insert(it.key, R); + } + return result; +} + +template <> +Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>()); + result.insert(it.key, R); + } + return result; +} + +/* ************************************************************************* */ +template static Matrix RoundSolutionS(const Matrix &S) { + const size_t N = S.cols() / d; + // First, compute a thin SVD of S + Eigen::JacobiSVD svd(S, Eigen::ComputeThinV); + const Vector sigmas = svd.singularValues(); + + // Construct a diagonal matrix comprised of the first d singular values + using DiagonalMatrix = Eigen::DiagonalMatrix; + DiagonalMatrix Sigma_d; + Sigma_d.diagonal() = sigmas.head(); + + // Now, construct a rank-d truncated singular value decomposition for S + Matrix R = Sigma_d * svd.matrixV().leftCols().transpose(); + + // Count the number of blocks whose determinants have positive sign + size_t numPositiveBlocks = 0; + for (size_t i = 0; i < N; ++i) { + // Compute the determinant of the ith dxd block of R + double determinant = R.middleCols(d * i).determinant(); + if (determinant > 0) + ++numPositiveBlocks; + } + + if (numPositiveBlocks < N / 2) { + // Less than half of the total number of blocks have the correct sign. + // To reverse their orientations, multiply with a reflection matrix. + DiagonalMatrix reflector; + reflector.setIdentity(); + reflector.diagonal()(d - 1) = -1; + R = reflector * R; + } + + return R; +} + +/* ************************************************************************* */ +template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { + // Round to a 2*2N matrix + Matrix R = RoundSolutionS<2>(S); + + // Finally, project each dxd rotation block to SO(2) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j)); + values.insert(j, Ri); + } + return values; +} + +template <> Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { + // Round to a 3*3N matrix + Matrix R = RoundSolutionS<3>(S); + + // Finally, project each dxd rotation block to SO(3) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j)); + values.insert(j, Ri); + } + return values; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::roundSolution(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return roundSolutionS(S); +} + +/* ************************************************************************* */ +template +double ShonanAveraging::cost(const Values &values) const { + NonlinearFactorGraph graph; + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>>( + keys[0], keys[1], SO(Rij.matrix()), model); + } + // Finally, project each dxd rotation block to SO(d) + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SO(it.value.matrix())); + } + return graph.error(result); +} + +/* ************************************************************************* */ +// Get kappa from noise model +template +static double Kappa(const BinaryMeasurement &measurement) { + const auto &isotropic = boost::dynamic_pointer_cast( + measurement.noiseModel()); + if (!isotropic) { + throw std::invalid_argument( + "Shonan averaging noise models must be isotropic."); + } + const double sigma = isotropic->sigma(); + return 1.0 / (sigma * sigma); +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildD() const { + // Each measurement contributes 2*d elements along the diagonal of the + // degree matrix. + static constexpr size_t stride = 2 * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t k = 0; k < d; k++) { + // Elements of ith block-diagonal + triplets.emplace_back(di + k, di + k, kappa); + // Elements of jth block-diagonal + triplets.emplace_back(dj + k, dj + k, kappa); + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse D(dN, dN); + D.setFromTriplets(triplets.begin(), triplets.end()); + + return D; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildQ() const { + // Each measurement contributes 2*d^2 elements on a pair of symmetric + // off-diagonal blocks + static constexpr size_t stride = 2 * d * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Extract rotation measurement + const auto Rij = measurement.measured().matrix(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t r = 0; r < d; r++) { + for (size_t c = 0; c < d; c++) { + // Elements of ij block + triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c)); + // Elements of ji block + triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r)); + } + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse Q(dN, dN); + Q.setFromTriplets(triplets.begin(), triplets.end()); + + return Q; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Matrix &S) const { + // Each pose contributes 2*d elements along the diagonal of Lambda + static constexpr size_t stride = d * d; + + // Reserve space for triplets + const size_t N = nrUnknowns(); + std::vector> triplets; + triplets.reserve(stride * N); + + // Do sparse-dense multiply to get Q*S' + auto QSt = Q_ * S.transpose(); + + for (size_t j = 0; j < N; j++) { + // Compute B, the building block for the j^th diagonal block of Lambda + const size_t dj = d * j; + Matrix B = QSt.middleRows(dj, d) * S.middleCols(dj); + + // Elements of jth block-diagonal + for (size_t r = 0; r < d; r++) + for (size_t c = 0; c < d; c++) + triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r))); + } + + // Construct and return a sparse matrix from these triplets + Sparse Lambda(d * N, d * N); + Lambda.setFromTriplets(triplets.begin(), triplets.end()); + return Lambda; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return computeLambda(S); +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeA(const Values &values) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +/// MINIMUM EIGENVALUE COMPUTATIONS + +/** This is a lightweight struct used in conjunction with Spectra to compute + * the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single + * nontrivial function, perform_op(x,y), that computes and returns the product + * y = (A + sigma*I) x */ +struct MatrixProdFunctor { + // Const reference to an externally-held matrix whose minimum-eigenvalue we + // want to compute + const Sparse &A_; + + // Spectral shift + double sigma_; + + // Constructor + explicit MatrixProdFunctor(const Sparse &A, double sigma = 0) + : A_(A), sigma_(sigma) {} + + int rows() const { return A_.rows(); } + int cols() const { return A_.cols(); } + + // Matrix-vector multiplication operation + void perform_op(const double *x, double *y) const { + // Wrap the raw arrays as Eigen Vector types + Eigen::Map X(x, rows()); + Eigen::Map Y(y, rows()); + + // Do the multiplication using wrapped Eigen vectors + Y = A_ * X + sigma_ * X; + } +}; + +/// Function to compute the minimum eigenvalue of A using Lanczos in Spectra. +/// This does 2 things: +/// +/// (1) Quick (coarse) eigenvalue computation to estimate the largest-magnitude +/// eigenvalue (2) A second eigenvalue computation applied to A-sigma*I, where +/// sigma is chosen to make the minimum eigenvalue of A the extremal eigenvalue +/// of A-sigma*I +/// +/// Upon completion, this returns a boolean value indicating whether the minimum +/// eigenvalue was computed to the required precision -- if so, its sets the +/// values of minEigenValue and minEigenVector appropriately + +/// Note that in the following function signature, S is supposed to be the +/// block-row-matrix that is a critical point for the optimization algorithm; +/// either S (Stiefel manifold) or R (block rotations). We use this to +/// construct a starting vector v for the Lanczos process that will be close to +/// the minimum eigenvector we're looking for whenever the relaxation is exact +/// -- this is a key feature that helps to make this method fast. Note that +/// instead of passing in all of S, it would be enough to pass in one of S's +/// *rows*, if that's more convenient. + +// For the defaults, David Rosen says: +// - maxIterations refers to the max number of Lanczos iterations to run; +// ~1000 should be sufficiently large +// - We've been using 10^-4 for the nonnegativity tolerance +// - for numLanczosVectors, 20 is a good default value + +static bool +SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, + Vector *minEigenVector = 0, size_t *numIterations = 0, + size_t maxIterations = 1000, + double minEigenvalueNonnegativityTolerance = 10e-4, + Eigen::Index numLanczosVectors = 20) { + // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos + MatrixProdFunctor lmOperator(A); + Spectra::SymEigsSolver + lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows())); + lmEigenValueSolver.init(); + + const int lmConverged = lmEigenValueSolver.compute( + maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + // Check convergence and bail out if necessary + if (lmConverged != 1) + return false; + + const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0); + + if (lmEigenValue < 0) { + // The largest-magnitude eigenvalue is negative, and therefore also the + // minimum eigenvalue, so just return this solution + *minEigenValue = lmEigenValue; + if (minEigenVector) { + *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + return true; + } + + // The largest-magnitude eigenvalue is positive, and is therefore the + // maximum eigenvalue. Therefore, after shifting the spectrum of A + // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted + // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A), + // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of + // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding + // eigenvector v_min + + MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue); + + Spectra::SymEigsSolver + minEigenValueSolver(&minShiftedOperator, 1, + std::min(numLanczosVectors, A.rows())); + + // If S is a critical point of F, then S^T is also in the null space of S - + // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are + // eigenvectors corresponding to the eigenvalue 0. In the case that the + // relaxation is exact, this is the *minimum* eigenvalue, and therefore the + // rows of S are exactly the eigenvectors that we're looking for. On the + // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at + // least one strictly negative eigenvalue, and the rows of S are *unstable + // fixed points* for the Lanczos iterations. Thus, we will take a slightly + // "fuzzed" version of the first row of S as an initialization for the + // Lanczos iterations; this allows for rapid convergence in the case that + // the relaxation is exact (since are starting close to a solution), while + // simultaneously allowing the iterations to escape from this fixed point in + // the case that the relaxation is not exact. + Vector v0 = S.row(0).transpose(); + Vector perturbation(v0.size()); + perturbation.setRandom(); + perturbation.normalize(); + Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% + + // Use this to initialize the eigensolver + minEigenValueSolver.init(xinit.data()); + + // Now determine the relative precision required in the Lanczos method in + // order to be able to estimate the smallest eigenvalue within an *absolute* + // tolerance of 'minEigenvalueNonnegativityTolerance' + const int minConverged = minEigenValueSolver.compute( + maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue, + Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + if (minConverged != 1) + return false; + + *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue; + if (minEigenVector) { + *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + if (numIterations) + *numIterations = minEigenValueSolver.num_iterations(); + return true; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::computeA(const Matrix &S) const { + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::computeMinEigenValue(const Values &values, + Vector *minEigenVector) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto A = computeA(S); + + double minEigenValue; + bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector); + if (!success) { + throw std::runtime_error( + "SparseMinimumEigenValue failed to compute minimum eigenvalue."); + } + return minEigenValue; +} + +/* ************************************************************************* */ +template +std::pair +ShonanAveraging::computeMinEigenVector(const Values &values) const { + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(values, &minEigenVector); + return std::make_pair(minEigenValue, minEigenVector); +} + +/* ************************************************************************* */ +template +bool ShonanAveraging::checkOptimality(const Values &values) const { + double minEigenValue = computeMinEigenValue(values); + return minEigenValue > parameters_.optimalityThreshold; +} + +/* ************************************************************************* */ +/// Create a tangent direction xi with eigenvector segment v_i +template +Vector ShonanAveraging::MakeATangentVector(size_t p, const Vector &v, + size_t i) { + // Create a tangent direction xi with eigenvector segment v_i + const size_t dimension = SOn::Dimension(p); + const auto v_i = v.segment(d * i); + Vector xi = Vector::Zero(dimension); + double sign = pow(-1.0, round((p + 1) / 2) + 1); + for (size_t j = 0; j < d; j++) { + xi(j + p - d - 1) = sign * v_i(d - j - 1); + sign = -sign; + } + return xi; +} + +/* ************************************************************************* */ +template +Matrix ShonanAveraging::riemannianGradient(size_t p, + const Values &values) const { + Matrix S_dot = StiefelElementMatrix(values); + // calculate the gradient of F(Q_dot) at Q_dot + Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose(); + // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() << + // "\t" << euclideanGradient.cols() << endl; + + // project the gradient onto the entire euclidean space + Matrix symBlockDiagProduct(p, d * nrUnknowns()); + for (size_t i = 0; i < nrUnknowns(); i++) { + // Compute block product + const size_t di = d * i; + const Matrix P = S_dot.middleCols(di).transpose() * + euclideanGradient.middleCols(di); + // Symmetrize this block + Matrix S = .5 * (P + P.transpose()); + // Compute S_dot * S and set corresponding block + symBlockDiagProduct.middleCols(di) = S_dot.middleCols(di) * S; + } + Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct; + return riemannianGradient; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector) { + Values lifted = LiftTo(p, values); + for (auto it : lifted.filter()) { + // Create a tangent direction xi with eigenvector segment v_i + // Assumes key is 0-based integer + const Vector xi = MakeATangentVector(p, minEigenVector, it.key); + // Move the old value in the descent direction + it.value = it.value.retract(xi); + } + return lifted; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance, + double preconditionedGradNormTolerance) const { + double funcVal = costAt(p - 1, values); + double alphaMin = 1e-2; + double alpha = + std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue)); + vector alphas; + vector fvals; + // line search + while ((alpha >= alphaMin)) { + Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector); + double funcValTest = costAt(p, Qplus); + Matrix gradTest = riemannianGradient(p, Qplus); + double gradTestNorm = gradTest.norm(); + // Record alpha and funcVal + alphas.push_back(alpha); + fvals.push_back(funcValTest); + if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) { + return Qplus; + } + alpha /= 2; + } + + auto fminIter = min_element(fvals.begin(), fvals.end()); + auto minIdx = distance(fvals.begin(), fminIter); + double fMin = fvals[minIdx]; + double aMin = alphas[minIdx]; + if (fMin < funcVal) { + Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector); + return Qplus; + } + + return LiftwithDescent(p, values, alpha * minEigenVector); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly(std::mt19937 &rng) const { + Values initial; + for (size_t j = 0; j < nrUnknowns(); j++) { + initial.insert(j, Rot::Random(rng)); + } + return initial; +} + +/* ************************************************************************* */ +template Values ShonanAveraging::initializeRandomly() const { + return initializeRandomly(kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomlyAt(size_t p, + std::mt19937 &rng) const { + const Values randomRotations = initializeRandomly(rng); + return LiftTo(p, randomRotations); // lift to p! +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomlyAt(size_t p) const { + return initializeRandomlyAt(p, kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +std::pair ShonanAveraging::run(const Values &initialEstimate, + size_t pMin, + size_t pMax) const { + Values Qstar; + Values initialSOp = LiftTo(pMin, initialEstimate); // lift to pMin! + for (size_t p = pMin; p <= pMax; p++) { + // Optimize until convergence at this level + Qstar = tryOptimizingAt(p, initialSOp); + + // Check certificate of global optimzality + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector); + if (minEigenValue > parameters_.optimalityThreshold) { + // If at global optimum, round and return solution + const Values SO3Values = roundSolution(Qstar); + return std::make_pair(SO3Values, minEigenValue); + } + + // Not at global optimimum yet, so check whether we will go to next level + if (p != pMax) { + // Calculate initial estimate for next level by following minEigenVector + initialSOp = + initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue); + } + } + throw std::runtime_error("Shonan::run did not converge for given pMax"); +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 +template class ShonanAveraging<2>; + +ShonanAveraging2::ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<2>(measurements, parameters) {} +ShonanAveraging2::ShonanAveraging2(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<2>(parseMeasurements(g2oFile), parameters) {} + +/* ************************************************************************* */ +// Explicit instantiation for d=3 +template class ShonanAveraging<3>; + +ShonanAveraging3::ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<3>(measurements, parameters) {} + +ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<3>(parseMeasurements(g2oFile), parameters) {} + +// TODO(frank): Deprecate after we land pybind wrapper + +// Extract Rot3 measurement from Pose3 betweenfactors +// Modeled after similar function in dataset.cpp +static BinaryMeasurement +convert(const BetweenFactor::shared_ptr &f) { + auto gaussian = + boost::dynamic_pointer_cast(f->noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + f->key1(), f->key2(), f->measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); +} + +static ShonanAveraging3::Measurements +extractRot3Measurements(const BetweenFactorPose3s &factors) { + ShonanAveraging3::Measurements result; + result.reserve(factors.size()); + for (auto f : factors) + result.push_back(convert(f)); + return result; +} + +ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters) + : ShonanAveraging<3>(extractRot3Measurements(factors), parameters) {} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h new file mode 100644 index 000000000..ed94329a2 --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.h @@ -0,0 +1,365 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.h + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace gtsam { +class NonlinearFactorGraph; +class LevenbergMarquardtOptimizer; + +/// Parameters governing optimization etc. +template struct GTSAM_EXPORT ShonanAveragingParameters { + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + using Anchor = std::pair; + + // Paremeters themselves: + LevenbergMarquardtParams lm; // LM parameters + double optimalityThreshold; // threshold used in checkOptimality + Anchor anchor; // pose to use as anchor if not Karcher + double alpha; // weight of anchor-based prior (default 0) + double beta; // weight of Karcher-based prior (default 1) + double gamma; // weight of gauge-fixing factors (default 0) + + ShonanAveragingParameters(const LevenbergMarquardtParams &lm = + LevenbergMarquardtParams::CeresDefaults(), + const std::string &method = "JACOBI", + double optimalityThreshold = -1e-4, + double alpha = 0.0, double beta = 1.0, + double gamma = 0.0); + + LevenbergMarquardtParams getLMParams() const { return lm; } + + void setOptimalityThreshold(double value) { optimalityThreshold = value; } + double getOptimalityThreshold() const { return optimalityThreshold; } + + void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; } + + void setAnchorWeight(double value) { alpha = value; } + double getAnchorWeight() { return alpha; } + + void setKarcherWeight(double value) { beta = value; } + double getKarcherWeight() { return beta; } + + void setGaugesWeight(double value) { gamma = value; } + double getGaugesWeight() { return gamma; } +}; + +using ShonanAveragingParameters2 = ShonanAveragingParameters<2>; +using ShonanAveragingParameters3 = ShonanAveragingParameters<3>; + +/** + * Class that implements Shonan Averaging from our ECCV'20 paper. + * Note: The "basic" API uses all Rot values (Rot2 or Rot3, depending on value + * of d), whereas the different levels and "advanced" API at SO(p) needs Values + * of type SOn. + * + * The template parameter d can be 2 or 3. + * Both are specialized in the .cpp file. + * + * If you use this code in your work, please consider citing our paper: + * Shonan Rotation Averaging, Global Optimality by Surfing SO(p)^n + * Frank Dellaert, David M. Rosen, Jing Wu, Robert Mahony, and Luca Carlone, + * European Computer Vision Conference, 2020. + * You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0 + */ +template class GTSAM_EXPORT ShonanAveraging { +public: + using Sparse = Eigen::SparseMatrix; + + // Define the Parameters type and use its typedef of the rotation type: + using Parameters = ShonanAveragingParameters; + using Rot = typename Parameters::Rot; + + // We store SO(d) BetweenFactors to get noise model + // TODO(frank): use BinaryMeasurement? + using Measurements = std::vector>; + +private: + Parameters parameters_; + Measurements measurements_; + size_t nrUnknowns_; + Sparse D_; // Sparse (diagonal) degree matrix + Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr + Sparse L_; // connection Laplacian L = D - Q, needed for optimality check + + /** + * Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as + * (i,j) and (j,i) blocks within a sparse matrix. + */ + Sparse buildQ() const; + + /// Build 3Nx3N sparse degree matrix D + Sparse buildD() const; + +public: + /// @name Standard Constructors + /// @{ + + /// Construct from set of relative measurements (given as BetweenFactor + /// for now) NoiseModel *must* be isotropic. + ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + + /// @} + /// @name Query properties + /// @{ + + /// Return number of unknowns + size_t nrUnknowns() const { return nrUnknowns_; } + + /// Return number of measurements + size_t nrMeasurements() const { return measurements_.size(); } + + /// k^th binary measurement + const BinaryMeasurement &measurement(size_t k) const { + return measurements_[k]; + } + + /// k^th measurement, as a Rot. + const Rot &measured(size_t k) const { return measurements_[k].measured(); } + + /// Keys for k^th measurement, as a vector of Key values. + const KeyVector &keys(size_t k) const { return measurements_[k].keys(); } + + /// @} + /// @name Matrix API (advanced use, debugging) + /// @{ + + Sparse D() const { return D_; } ///< Sparse version of D + Matrix denseD() const { return Matrix(D_); } ///< Dense version of D + Sparse Q() const { return Q_; } ///< Sparse version of Q + Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q + Sparse L() const { return L_; } ///< Sparse version of L + Matrix denseL() const { return Matrix(L_); } ///< Dense version of L + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeLambda(const Matrix &S) const; + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Values &values) const { + return Matrix(computeLambda(values)); + } + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Matrix &S) const { + return Matrix(computeLambda(S)); + } + + /// Compute A matrix whose Eigenvalues we will examine + Sparse computeA(const Values &values) const; + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeA(const Matrix &S) const; + + /// Dense version of computeA for wrapper/testing + Matrix computeA_(const Values &values) const { + return Matrix(computeA(values)); + } + + /// Project to pxdN Stiefel manifold + static Matrix StiefelElementMatrix(const Values &values); + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + */ + double computeMinEigenValue(const Values &values, + Vector *minEigenVector = nullptr) const; + + /// Project pxdN Stiefel manifold matrix S to Rot3^N + Values roundSolutionS(const Matrix &S) const; + + /// Create a tangent direction xi with eigenvector segment v_i + static Vector MakeATangentVector(size_t p, const Vector &v, size_t i); + + /// Calculate the riemannian gradient of F(values) at values + Matrix riemannianGradient(size_t p, const Values &values) const; + + /** + * Lift up the dimension of values in type SO(p-1) with descent direction + * provided by minEigenVector and return new values in type SO(p) + */ + static Values LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector); + + /** + * Given some values at p-1, return new values at p, by doing a line search + * along the descent direction, computed from the minimum eigenvector at p-1. + * @param values should be of type SO(p-1) + * @param minEigenVector corresponding to minEigenValue at level p-1 + * @return values of type SO(p) + */ + Values + initializeWithDescent(size_t p, const Values &values, + const Vector &minEigenVector, double minEigenValue, + double gradienTolerance = 1e-2, + double preconditionedGradNormTolerance = 1e-4) const; + /// @} + /// @name Advanced API + /// @{ + + /** + * Build graph for SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + */ + NonlinearFactorGraph buildGraphAt(size_t p) const; + + /** + * Create initial Values of type SO(p) + * @param p the dimensionality of the rotation manifold + */ + Values initializeRandomlyAt(size_t p, std::mt19937 &rng) const; + + /// Version of initializeRandomlyAt with fixed random seed. + Values initializeRandomlyAt(size_t p) const; + + /** + * Calculate cost for SO(p) + * Values should be of type SO(p) + */ + double costAt(size_t p, const Values &values) const; + + /** + * Given an estimated local minimum Yopt for the (possibly lifted) + * relaxation, this function computes and returns the block-diagonal elements + * of the corresponding Lagrange multiplier. + */ + Sparse computeLambda(const Values &values) const; + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + * @return minEigenVector and minEigenValue + */ + std::pair computeMinEigenVector(const Values &values) const; + + /** + * Check optimality + * @param values: should be of type SOn + */ + bool checkOptimality(const Values &values) const; + + /** + * Try to create optimizer at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return lm optimizer + */ + boost::shared_ptr createOptimizerAt( + size_t p, const Values &initial) const; + + /** + * Try to optimize at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return SO(p) values + */ + Values tryOptimizingAt(size_t p, const Values &initial) const; + + /** + * Project from SO(p) to Rot2 or Rot3 + * Values should be of type SO(p) + */ + Values projectFrom(size_t p, const Values &values) const; + + /** + * Project from SO(p)^N to Rot2^N or Rot3^N + * Values should be of type SO(p) + */ + Values roundSolution(const Values &values) const; + + /// Lift Values of type T to SO(p) + template static Values LiftTo(size_t p, const Values &values) { + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SOn::Lift(p, it.value.matrix())); + } + return result; + } + + /// @} + /// @name Basic API + /// @{ + + /** + * Calculate cost for SO(3) + * Values should be of type Rot3 + */ + double cost(const Values &values) const; + + /** + * Initialize randomly at SO(d) + * @param rng random number generator + * Example: + * std::mt19937 rng(42); + * Values initial = initializeRandomly(rng, p); + */ + Values initializeRandomly(std::mt19937 &rng) const; + + /// Random initialization for wrapper, fixed random seed. + Values initializeRandomly() const; + + /** + * Optimize at different values of p until convergence. + * @param initial initial Rot3 values + * @param pMin value of p to start Riemanian staircase at (default: d). + * @param pMax maximum value of p to try (default: 10) + * @return (Rot3 values, minimum eigenvalue) + */ + std::pair run(const Values &initialEstimate, size_t pMin = d, + size_t pMax = 10) const; + /// @} +}; + +// Subclasses for d=2 and d=3 that explicitly instantiate, as well as provide a +// convenience interface with file access. + +class ShonanAveraging2 : public ShonanAveraging<2> { +public: + ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging2(string g2oFile, const Parameters ¶meters = Parameters()); +}; + +class ShonanAveraging3 : public ShonanAveraging<3> { +public: + ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging3(string g2oFile, const Parameters ¶meters = Parameters()); + + // TODO(frank): Deprecate after we land pybind wrapper + ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters = Parameters()); +}; +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.cpp b/gtsam/sfm/ShonanFactor.cpp new file mode 100644 index 000000000..b911fb5a4 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.cpp @@ -0,0 +1,141 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#include + +#include +#include +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +template +ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model, + const boost::shared_ptr &G) + : NoiseModelFactor2(ConvertNoiseModel(model, p * d), j1, j2), + M_(R12.matrix()), // d*d in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) + G_(G) { + if (noiseModel()->dim() != d * p_) + throw std::invalid_argument( + "ShonanFactor: model with incorrect dimension."); + if (!G) { + G_ = boost::make_shared(); + *G_ = SOn::VectorizedGenerators(p); // expensive! + } + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) + throw std::invalid_argument("ShonanFactor: passed in generators " + "of incorrect dimension."); +} + +//****************************************************************************** +template +void ShonanFactor::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key1()) << "," + << keyFormatter(key2()) << ")\n"; + traits::Print(M_, " M: "); + noiseModel_->print(" noise model: "); +} + +//****************************************************************************** +template +bool ShonanFactor::equals(const NonlinearFactor &expected, + double tol) const { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + p_ == e->p_ && M_ == e->M_; +} + +//****************************************************************************** +template +void ShonanFactor::fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_Jacobians); + const size_t dim = p_ * d; // Stiefel manifold dimension + + if (H1) { + // If asked, calculate Jacobian H1 as as (M' \otimes M1) * G + // M' = dxd, M1 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (M' \otimes M1) is dim*dim, but last pp-dim columns are zero + *H1 = Matrix::Zero(dim, G_->cols()); + for (size_t j = 0; j < d; j++) { + auto MG_j = M1 * G_->middleRows(j * p_, p_); // p_ * Dim(p) + for (size_t i = 0; i < d; i++) { + H1->middleRows(i * p_, p_) -= M_(j, i) * MG_j; + } + } + } + if (H2) { + // If asked, calculate Jacobian H2 as as (I_d \otimes M2) * G + // I_d = dxd, M2 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (I_d \otimes M2) is dim*dim, but again last pp-dim columns are zero + H2->resize(dim, G_->cols()); + for (size_t i = 0; i < d; i++) { + H2->middleRows(i * p_, p_) = M2 * G_->middleRows(i * p_, p_); + } + } +} + +//****************************************************************************** +template +Vector ShonanFactor::evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_evaluateError); + + const Matrix &M1 = Q1.matrix(); + const Matrix &M2 = Q2.matrix(); + if (M1.rows() != static_cast(p_) || M2.rows() != static_cast(p_)) + throw std::invalid_argument("Invalid dimension SOn values passed to " + "ShonanFactor::evaluateError"); + + const size_t dim = p_ * d; // Stiefel manifold dimension + Vector fQ2(dim), hQ1(dim); + + // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) + fQ2 << Eigen::Map(M2.data(), dim, 1); + + // Vectorize M1*P*R12 + const Matrix Q1PR12 = M1.leftCols() * M_; + hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); + + this->fillJacobians(M1, M2, H1, H2); + + return fQ2 - hQ1; +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template class ShonanFactor<2>; +template class ShonanFactor<3>; + +//****************************************************************************** + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.h b/gtsam/sfm/ShonanFactor.h new file mode 100644 index 000000000..3c43c2c52 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.h @@ -0,0 +1,91 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Main factor type in Shonan averaging, on SO(n) pairs + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace gtsam { + +/** + * ShonanFactor is a BetweenFactor that moves in SO(p), but will + * land on the SO(d) sub-manifold of SO(p) at the global minimum. It projects + * the SO(p) matrices down to a Stiefel manifold of p*d matrices. + */ +template +class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { + Matrix M_; ///< measured rotation between R1 and R2 + size_t p_, pp_; ///< dimensionality constants + boost::shared_ptr G_; ///< matrix of vectorized generators + + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + +public: + /// @name Constructor + /// @{ + + /// Constructor. Note we convert to d*p-dimensional noise model. + /// To save memory and mallocs, pass in the vectorized Lie algebra generators: + /// G = boost::make_shared(SOn::VectorizedGenerators(p)); + ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model = nullptr, + const boost::shared_ptr &G = nullptr); + + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override; + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + + /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] + /// projects down from SO(p) to the Stiefel manifold of px3 matrices. + Vector + evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override; + /// @} + +private: + /// Calculate Jacobians if asked, Only implemented for d=2 and 3 in .cpp + void fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const; +}; + +// Explicit instantiation for d=2 and d=3 in .cpp file: +using ShonanFactor2 = ShonanFactor<2>; +using ShonanFactor3 = ShonanFactor<3>; + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanGaugeFactor.h b/gtsam/sfm/ShonanGaugeFactor.h new file mode 100644 index 000000000..4847c5d58 --- /dev/null +++ b/gtsam/sfm/ShonanGaugeFactor.h @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanGaugeFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Factor used in Shonan Averaging to clamp down gauge freedom + */ + +#pragma once + +#include +#include +#include + +#include + +namespace gtsam { +/** + * The ShonanGaugeFactor creates a constraint on a single SO(n) to avoid moving + * in the stabilizer. + * + * Details: SO(p) contains the p*3 Stiefel matrices of orthogonal frames: we + * take those to be the 3 columns on the left. + * The P*P skew-symmetric matrices associated with so(p) can be partitioned as + * (Appendix B in the ECCV paper): + * | [w] -K' | + * | K [g] | + * where w is the SO(3) space, K are the extra Stiefel diemnsions (wormhole!) + * and (g)amma are extra dimensions in SO(p) that do not modify the cost + * function. The latter corresponds to rotations SO(p-d), and so the first few + * values of p-d for d==3 with their corresponding dimensionality are {0:0, 1:0, + * 2:1, 3:3, ...} + * + * The ShonanGaugeFactor adds a unit Jacobian to these extra dimensions, + * essentially restricting the optimization to the Stiefel manifold. + */ +class GTSAM_EXPORT ShonanGaugeFactor : public NonlinearFactor { + // Row dimension, equal to the dimensionality of SO(p-d) + size_t rows_; + + /// Constant Jacobian + boost::shared_ptr whitenedJacobian_; + +public: + /** + * Construct from key for an SO(p) matrix, for base dimension d (2 or 3) + * If parameter gamma is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(gamma). + */ + ShonanGaugeFactor(Key key, size_t p, size_t d = 3, + boost::optional gamma = boost::none) + : NonlinearFactor(boost::assign::cref_list_of<1>(key)) { + if (p < d) { + throw std::invalid_argument("ShonanGaugeFactor must have p>=d."); + } + // Calculate dimensions + size_t q = p - d; + size_t P = SOn::Dimension(p); // dimensionality of SO(p) + rows_ = SOn::Dimension(q); // dimensionality of SO(q), the gauge + + // Create constant Jacobian as a rows_*P matrix: there are rows_ penalized + // dimensions, but it is a bit tricky to find them among the P columns. + // The key is to look at how skew-symmetric matrices are laid out in SOn.h: + // the first tangent dimension will always be included, but beyond that we + // have to be careful. We always need to skip the d top-rows of the skew- + // symmetric matrix as they below to K, part of the Stiefel manifold. + Matrix A(rows_, P); + A.setZero(); + double invSigma = gamma ? std::sqrt(*gamma) : 1.0; + size_t i = 0, j = 0, n = p - 1 - d; + while (i < rows_) { + A.block(i, j, n, n) = invSigma * Matrix::Identity(n, n); + i += n; + j += n + d; // skip d columns + n -= 1; + } + // TODO(frank): assign the right one in the right columns + whitenedJacobian_ = + boost::make_shared(key, A, Vector::Zero(rows_)); + } + + /// Destructor + virtual ~ShonanGaugeFactor() {} + + /// Calculate the error of the factor: always zero + double error(const Values &c) const override { return 0; } + + /// get the dimension of the factor (number of rows on linearization) + size_t dim() const override { return rows_; } + + /// linearize to a GaussianFactor + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; + } +}; +// \ShonanGaugeFactor + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/tests/testShonanAveraging.cpp b/gtsam/sfm/tests/testShonanAveraging.cpp new file mode 100644 index 000000000..cc4319e15 --- /dev/null +++ b/gtsam/sfm/tests/testShonanAveraging.cpp @@ -0,0 +1,360 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for Shonan Averaging algorithm + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +template +using Rot = typename std::conditional::type; + +template +using Pose = typename std::conditional::type; + +ShonanAveraging3 fromExampleName( + const std::string &name, + ShonanAveraging3::Parameters parameters = ShonanAveraging3::Parameters()) { + string g2oFile = findExampleDataFile(name); + return ShonanAveraging3(g2oFile, parameters); +} + +static const ShonanAveraging3 kShonan = fromExampleName("toyExample.g2o"); + +static std::mt19937 kRandomNumberGenerator(42); + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkConstructor) { + EXPECT_LONGS_EQUAL(5, kShonan.nrUnknowns()); + + EXPECT_LONGS_EQUAL(15, kShonan.D().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.D().cols()); + auto D = kShonan.denseD(); + EXPECT_LONGS_EQUAL(15, D.rows()); + EXPECT_LONGS_EQUAL(15, D.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.Q().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.Q().cols()); + auto Q = kShonan.denseQ(); + EXPECT_LONGS_EQUAL(15, Q.rows()); + EXPECT_LONGS_EQUAL(15, Q.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.L().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.L().cols()); + auto L = kShonan.denseL(); + EXPECT_LONGS_EQUAL(15, L.rows()); + EXPECT_LONGS_EQUAL(15, L.cols()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, buildGraphAt) { + auto graph = kShonan.buildGraphAt(5); + EXPECT_LONGS_EQUAL(7, graph.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkOptimality) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + auto Lambda = kShonan.computeLambda(random); + EXPECT_LONGS_EQUAL(15, Lambda.rows()); + EXPECT_LONGS_EQUAL(15, Lambda.cols()); + EXPECT_LONGS_EQUAL(45, Lambda.nonZeros()); + auto lambdaMin = kShonan.computeMinEigenValue(random); + // EXPECT_DOUBLES_EQUAL(-5.2964625490657866, lambdaMin, + // 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(-414.87376657555996, lambdaMin, + 1e-4); // Regression test + EXPECT(!kShonan.checkOptimality(random)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt3) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values initial = ShonanAveraging3::LiftTo(3, randomRotations); // convert to SOn + EXPECT(!kShonan.checkOptimality(initial)); + const Values result = kShonan.tryOptimizingAt(3, initial); + EXPECT(kShonan.checkOptimality(result)); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(3, result), 1e-4); + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt4) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + const Values result = kShonan.tryOptimizingAt(4, random); + EXPECT(kShonan.checkOptimality(result)); + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(4, result), 1e-3); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, MakeATangentVector) { + Vector9 v; + v << 1, 2, 3, 4, 5, 6, 7, 8, 9; + Matrix expected(5, 5); + expected << 0, 0, 0, 0, -4, // + 0, 0, 0, 0, -5, // + 0, 0, 0, 0, -6, // + 0, 0, 0, 0, 0, // + 4, 5, 6, 0, 0; + const Vector xi_1 = ShonanAveraging3::MakeATangentVector(5, v, 1); + const auto actual = SOn::Hat(xi_1); + CHECK(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, LiftTo) { + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + Values lifted = ShonanAveraging3::LiftTo(5, initial); + EXPECT(assert_equal(SOn(5), lifted.at(0))); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, CheckWithEigen) { + // control randomness + static std::mt19937 rng(0); + Vector descentDirection = Vector::Random(15); // for use below + const Values randomRotations = kShonan.initializeRandomly(rng); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + + // Optimize + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + + // Compute Eigenvalue with Spectra solver + double lambda = kShonan.computeMinEigenValue(Qstar3); + + // Check Eigenvalue with slow Eigen version, converts matrix A to dense matrix! + const Matrix S = ShonanAveraging3::StiefelElementMatrix(Qstar3); + auto A = kShonan.computeA(S); + bool computeEigenvectors = false; + Eigen::EigenSolver eigenSolver(Matrix(A), computeEigenvectors); + auto lambdas = eigenSolver.eigenvalues().real(); + double minEigenValue = lambdas(0); + for (int i = 1; i < lambdas.size(); i++) + minEigenValue = min(lambdas(i), minEigenValue); + + // Actual check + EXPECT_DOUBLES_EQUAL(minEigenValue, lambda, 1e-12); + + // Construct test descent direction (as minEigenVector is not predictable + // across platforms, being one from a basically flat 3d- subspace) + + // Check descent + Values initialQ4 = + ShonanAveraging3::LiftwithDescent(4, Qstar3, descentDirection); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); + + // TODO(frank): uncomment this regression test: currently not repeatable + // across platforms. + // Matrix expected(4, 4); + // expected << 0.0459224, -0.688689, -0.216922, 0.690321, // + // 0.92381, 0.191931, 0.255854, 0.21042, // + // -0.376669, 0.301589, 0.687953, 0.542111, // + // -0.0508588, 0.630804, -0.643587, 0.43046; + // EXPECT(assert_equal(SOn(expected), initialQ4.at(0), 1e-5)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, initializeWithDescent) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + Vector minEigenVector; + double lambdaMin = kShonan.computeMinEigenValue(Qstar3, &minEigenVector); + Values initialQ4 = + kShonan.initializeWithDescent(4, Qstar3, minEigenVector, lambdaMin); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, run) { + auto initial = kShonan.initializeRandomly(kRandomNumberGenerator); + auto result = kShonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(result.first), 1e-3); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, result.second, + 1e-4); // Regression test +} + +/* ************************************************************************* */ +namespace klaus { +// The data in the file is the Colmap solution +const Rot3 wR0(0.9992281076190063, -0.02676080288219576, -0.024497002638379624, + -0.015064701622500615); +const Rot3 wR1(0.998239108728862, -0.049543805396343954, -0.03232420352077356, + -0.004386230477751116); +const Rot3 wR2(0.9925378735259738, -0.07993768981394891, 0.0825062894866454, + -0.04088089479075661); +} // namespace klaus + +TEST(ShonanAveraging3, runKlaus) { + using namespace klaus; + + // Initialize a Shonan instance without the Karcher mean + ShonanAveraging3::Parameters parameters; + parameters.setKarcherWeight(0); + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = + fromExampleName("Klaus3.g2o", parameters); + + // Check nr poses + EXPECT_LONGS_EQUAL(3, shonan.nrUnknowns()); + + // Colmap uses the Y-down vision frame, and the first 3 rotations are close to + // identity. We check that below. Note tolerance is quite high. + static const Rot3 identity; + EXPECT(assert_equal(identity, wR0, 0.2)); + EXPECT(assert_equal(identity, wR1, 0.2)); + EXPECT(assert_equal(identity, wR2, 0.2)); + + // Get measurements + const Rot3 R01 = shonan.measured(0); + const Rot3 R12 = shonan.measured(1); + const Rot3 R02 = shonan.measured(2); + + // Regression test to make sure data did not change. + EXPECT(assert_equal(Rot3(0.9995433591728293, -0.022048798853273946, + -0.01796327847857683, 0.010210006313668573), + R01)); + + // Check Colmap solution agrees OK with relative rotation measurements. + EXPECT(assert_equal(R01, wR0.between(wR1), 0.1)); + EXPECT(assert_equal(R12, wR1.between(wR2), 0.1)); + EXPECT(assert_equal(R02, wR0.between(wR2), 0.1)); + + // Run Shonan (with prior on first rotation) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-9.2259161494467889e-05, result.second, + 1e-4); // Regression + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + // rR0 = rRw * wR0 => rRw = rR0 * wR0.inverse() + // rR1 = rRw * wR1 + // rR2 = rRw * wR2 + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, runKlausKarcher) { + using namespace klaus; + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o"); + + // Run Shonan (with Karcher mean prior) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-1.361402670507772e-05, result.second, + 1e-4); // Regression test + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging2, runKlausKarcher) { + // Load 2D toy example + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + // lmParams.setVerbosityLM("SUMMARY"); + string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + ShonanAveraging2::Parameters parameters(lmParams); + auto measurements = parseMeasurements(g2oFile); + ShonanAveraging2 shonan(measurements, parameters); + EXPECT_LONGS_EQUAL(4, shonan.nrUnknowns()); + + // Check graph building + NonlinearFactorGraph graph = shonan.buildGraphAt(2); + EXPECT_LONGS_EQUAL(6, graph.size()); + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 2); + EXPECT_DOUBLES_EQUAL(0.0008211, shonan.cost(result.first), 1e-6); + EXPECT_DOUBLES_EQUAL(0, result.second, 1e-10); // certificate! +} + +/* ************************************************************************* */ +// Test alpha/beta/gamma prior weighting. +TEST(ShonanAveraging3, PriorWeights) { + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + ShonanAveraging3::Parameters params(lmParams); + EXPECT_DOUBLES_EQUAL(0, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(1, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(0, params.gamma, 1e-9); + double alpha = 100.0, beta = 200.0, gamma = 300.0; + params.setAnchorWeight(alpha); + params.setKarcherWeight(beta); + params.setGaugesWeight(gamma); + EXPECT_DOUBLES_EQUAL(alpha, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(beta, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(gamma, params.gamma, 1e-9); + params.setKarcherWeight(0); + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o", params); + for (auto i : {0,1,2}) { + const auto& m = shonan.measurement(i); + auto isotropic = + boost::static_pointer_cast(m.noiseModel()); + CHECK(isotropic != nullptr); + EXPECT_LONGS_EQUAL(3, isotropic->dim()); + EXPECT_DOUBLES_EQUAL(0.2, isotropic->sigma(), 1e-9); + } + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + EXPECT_DOUBLES_EQUAL(3.0756, shonan.cost(initial), 1e-4); + auto result = shonan.run(initial, 3, 3); + EXPECT_DOUBLES_EQUAL(0.0015, shonan.cost(result.first), 1e-4); +} +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanFactor.cpp b/gtsam/sfm/tests/testShonanFactor.cpp new file mode 100644 index 000000000..ef94c5cf4 --- /dev/null +++ b/gtsam/sfm/tests/testShonanFactor.cpp @@ -0,0 +1,121 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testFrobeniusFactor.cpp + * + * @file testFrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Check evaluateError for various Frobenius norm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +//****************************************************************************** +namespace so3 { +SO3 id; +Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1); +Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace so3 + +//****************************************************************************** +namespace submanifold { +SO4 id; +Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1.tail<3>()); +SO4 Q1 = SO4::Expmap(v1); +Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2.tail<3>()); +SO4 Q2 = SO4::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace submanifold + +/* ************************************************************************* */ +TEST(ShonanFactor3, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(3, 1.2); + for (const size_t p : {5, 4, 3}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(3, 3) = submanifold::R1.matrix(); + SOn Q1(M); + M.topLeftCorner(3, 3) = submanifold::R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor3(1, 2, Rot3(::so3::R12.matrix()), p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +TEST(ShonanFactor3, equivalenceToSO3) { + using namespace ::submanifold; + auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); + auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension + auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); + auto factor4 = ShonanFactor3(1, 2, Rot3(R12.matrix()), 4, model); + const Matrix3 E3(factor3.evaluateError(R1, R2).data()); + const Matrix43 E4( + factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); + EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); + EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); +} + +/* ************************************************************************* */ +TEST(ShonanFactor2, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(1, 1.2); + const Rot2 R1(0.3), R2(0.5), R12(0.2); + for (const size_t p : {5, 4, 3, 2}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(2, 2) = R1.matrix(); + SOn Q1(M); + M.topLeftCorner(2, 2) = R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor2(1, 2, R12, p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanGaugeFactor.cpp b/gtsam/sfm/tests/testShonanGaugeFactor.cpp new file mode 100644 index 000000000..344394b9c --- /dev/null +++ b/gtsam/sfm/tests/testShonanGaugeFactor.cpp @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanGaugeFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for ShonanGaugeFactor class + */ + +#include + +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +// Check dimensions of all low-dim GaugeFactors +TEST(ShonanAveraging, GaugeFactorLows) { + constexpr Key key(123); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 2, 2).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 2).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 4, 2).dim()); // SO(4-2) -> 1 + EXPECT_LONGS_EQUAL(3, ShonanGaugeFactor(key, 5, 2).dim()); // SO(5-2) -> 3 + + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 3).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 4, 3).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 5, 3).dim()); // SO(5-3) -> 1 +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6) +TEST(ShonanAveraging, GaugeFactorSO6) { + constexpr Key key(666); + ShonanGaugeFactor factor(key, 6); // For SO(6) + Matrix A = Matrix::Zero(3, 15); // SO(6-3) = SO(3) == 3-dimensional gauge + A(0, 0) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 1; // then we skip 3 tangent dimensions + A(2, 5) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(3)); + Values values; + EXPECT_LONGS_EQUAL(3, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(7) +TEST(ShonanAveraging, GaugeFactorSO7) { + constexpr Key key(777); + ShonanGaugeFactor factor(key, 7); // For SO(7) + Matrix A = Matrix::Zero(6, 21); // SO(7-3) = SO(4) == 6-dimensional gauge + A(0, 0) = 1; // first 3 of 7^th skew column, which has 6 non-zero entries + A(1, 1) = 1; + A(2, 2) = 1; // then we skip 3 tangent dimensions + A(3, 6) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(4, 7) = 1; // then we skip 3 tangent dimensions + A(5, 11) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6), with base SO(2) +TEST(ShonanAveraging, GaugeFactorSO6over2) { + constexpr Key key(602); + double gamma = 4; + ShonanGaugeFactor factor(key, 6, 2, gamma); // For SO(6), base SO(2) + Matrix A = Matrix::Zero(6, 15); // SO(6-2) = SO(4) == 6-dimensional gauge + A(0, 0) = 2; // first 3 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 2; + A(2, 2) = 2; // then we skip only 2 tangent dimensions + A(3, 5) = 2; // first 2 of 5^th skew column, which has 4 non-zero entries + A(4, 6) = 2; // then we skip only 2 tangent dimensions + A(5, 9) = 2; // first of 4th skew colum, which has 3 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 80aeea947..5697a0cd6 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -18,118 +18,38 @@ #include -#include -#include -#include - -#include -#include -#include - using namespace std; namespace gtsam { //****************************************************************************** -boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit) { +boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t d, bool defaultToUnit) { double sigma = 1.0; if (model != nullptr) { - if (model->dim() != 6) { - if (!defaultToUnit) - throw std::runtime_error("Can only convert Pose3 noise models"); - } else { - auto sigmas = model->sigmas().head(3).eval(); - if (sigmas(1) != sigmas(0) || sigmas(2) != sigmas(0)) { - if (!defaultToUnit) + auto sigmas = model->sigmas(); + size_t n = sigmas.size(); + if (n == 1) { + sigma = sigmas(0); // Rot2 + goto exit; + } + if (n == 3 || n == 6) { + sigma = sigmas(2); // Pose2, Rot3, or Pose3 + if (sigmas(0) != sigma || sigmas(1) != sigma) { + if (!defaultToUnit) { throw std::runtime_error("Can only convert isotropic rotation noise"); - } else { - sigma = sigmas(0); + } } + goto exit; + } + if (!defaultToUnit) { + throw std::runtime_error("Can only convert Pose2/Pose3 noise models"); } } +exit: return noiseModel::Isotropic::Sigma(d, sigma); } //****************************************************************************** -FrobeniusWormholeFactor::FrobeniusWormholeFactor( - Key j1, Key j2, const Rot3 &R12, size_t p, const SharedNoiseModel &model, - const boost::shared_ptr &G) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, p * 3), j1, j2), - M_(R12.matrix()), // 3*3 in all cases - p_(p), // 4 for SO(4) - pp_(p * p), // 16 for SO(4) - G_(G) { - if (noiseModel()->dim() != 3 * p_) - throw std::invalid_argument( - "FrobeniusWormholeFactor: model with incorrect dimension."); - if (!G) { - G_ = boost::make_shared(); - *G_ = SOn::VectorizedGenerators(p); // expensive! - } - if (static_cast(G_->rows()) != pp_ || - static_cast(G_->cols()) != SOn::Dimension(p)) - throw std::invalid_argument("FrobeniusWormholeFactor: passed in generators " - "of incorrect dimension."); -} -//****************************************************************************** -void FrobeniusWormholeFactor::print(const std::string &s, const KeyFormatter &keyFormatter) const { - std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" << keyFormatter(key1()) << "," - << keyFormatter(key2()) << ")\n"; - traits::Print(M_, " M: "); - noiseModel_->print(" noise model: "); -} - -//****************************************************************************** -bool FrobeniusWormholeFactor::equals(const NonlinearFactor &expected, - double tol) const { - auto e = dynamic_cast(&expected); - return e != nullptr && NoiseModelFactor2::equals(*e, tol) && - p_ == e->p_ && M_ == e->M_; -} - -//****************************************************************************** -Vector FrobeniusWormholeFactor::evaluateError( - const SOn& Q1, const SOn& Q2, boost::optional H1, - boost::optional H2) const { - gttic(FrobeniusWormholeFactorP_evaluateError); - - const Matrix& M1 = Q1.matrix(); - const Matrix& M2 = Q2.matrix(); - assert(M1.rows() == p_ && M2.rows() == p_); - - const size_t dim = 3 * p_; // Stiefel manifold dimension - Vector fQ2(dim), hQ1(dim); - - // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) - fQ2 << Eigen::Map(M2.data(), dim, 1); - - // Vectorize M1*P*R12 - const Matrix Q1PR12 = M1.leftCols<3>() * M_; - hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); - - // If asked, calculate Jacobian as (M \otimes M1) * G - if (H1) { - const size_t p2 = 2 * p_; - Matrix RPxQ = Matrix::Zero(dim, pp_); - RPxQ.block(0, 0, p_, dim) << M1 * M_(0, 0), M1 * M_(1, 0), M1 * M_(2, 0); - RPxQ.block(p_, 0, p_, dim) << M1 * M_(0, 1), M1 * M_(1, 1), M1 * M_(2, 1); - RPxQ.block(p2, 0, p_, dim) << M1 * M_(0, 2), M1 * M_(1, 2), M1 * M_(2, 2); - *H1 = -RPxQ * (*G_); - } - if (H2) { - const size_t p2 = 2 * p_; - Matrix PxQ = Matrix::Zero(dim, pp_); - PxQ.block(0, 0, p_, p_) = M2; - PxQ.block(p_, p_, p_, p_) = M2; - PxQ.block(p2, p2, p_, p_) = M2; - *H2 = PxQ * (*G_); - } - - return fQ2 - hQ1; -} - -//****************************************************************************** - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h index 474cf6143..1fc37c785 100644 --- a/gtsam/slam/FrobeniusFactor.h +++ b/gtsam/slam/FrobeniusFactor.h @@ -18,6 +18,7 @@ #pragma once +#include #include #include #include @@ -25,23 +26,24 @@ namespace gtsam { /** - * When creating (any) FrobeniusFactor we convert a 6-dimensional Pose3 - * BetweenFactor noise model into an 9 or 16-dimensional isotropic noise + * When creating (any) FrobeniusFactor we can convert a Rot/Pose + * BetweenFactor noise model into a n-dimensional isotropic noise * model used to weight the Frobenius norm. If the noise model passed is - * null we return a Dim-dimensional isotropic noise model with sigma=1.0. If - * not, we we check if the 3-dimensional noise model on rotations is - * isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an + * null we return a n-dimensional isotropic noise model with sigma=1.0. If + * not, we we check if the d-dimensional noise model on rotations is + * isotropic. If it is, we extend to 'n' dimensions, otherwise we throw an * error. If defaultToUnit == false throws an exception on unexepcted input. */ - GTSAM_EXPORT boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit = true); +GTSAM_EXPORT boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t n, + bool defaultToUnit = true); /** * FrobeniusPrior calculates the Frobenius norm between a given matrix and an * element of SO(3) or SO(4). */ template -class FrobeniusPrior : public NoiseModelFactor1 { +class GTSAM_EXPORT FrobeniusPrior : public NoiseModelFactor1 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; using MatrixNN = typename Rot::MatrixNN; Eigen::Matrix vecM_; ///< vectorized matrix to approximate @@ -50,7 +52,7 @@ class FrobeniusPrior : public NoiseModelFactor1 { /// Constructor FrobeniusPrior(Key j, const MatrixNN& M, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor1(ConvertPose3NoiseModel(model, Dim), j) { + : NoiseModelFactor1(ConvertNoiseModel(model, Dim), j) { vecM_ << Eigen::Map(M.data(), Dim, 1); } @@ -66,13 +68,13 @@ class FrobeniusPrior : public NoiseModelFactor1 { * The template argument can be any fixed-size SO. */ template -class FrobeniusFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusFactor : public NoiseModelFactor2 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; public: /// Constructor FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, Dim), j1, + : NoiseModelFactor2(ConvertNoiseModel(model, Dim), j1, j2) {} /// Error is just Frobenius norm between rotation matrices. @@ -106,7 +108,7 @@ class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12, const SharedNoiseModel& model = nullptr) : NoiseModelFactor2( - ConvertPose3NoiseModel(model, Dim), j1, j2), + ConvertNoiseModel(model, Dim), j1, j2), R12_(R12), R2hat_H_R1_(R12.inverse().AdjointMap()) {} @@ -150,52 +152,4 @@ class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { /// @} }; -/** - * FrobeniusWormholeFactor is a BetweenFactor that moves in SO(p), but will - * land on the SO(3) sub-manifold of SO(p) at the global minimum. It projects - * the SO(p) matrices down to a Stiefel manifold of p*d matrices. - * TODO(frank): template on D=2 or 3 - */ -class GTSAM_EXPORT FrobeniusWormholeFactor - : public NoiseModelFactor2 { - Matrix M_; ///< measured rotation between R1 and R2 - size_t p_, pp_; ///< dimensionality constants - boost::shared_ptr G_; ///< matrix of vectorized generators - -public: - /// @name Constructor - /// @{ - - /// Constructor. Note we convert to 3*p-dimensional noise model. - /// To save memory and mallocs, pass in the vectorized Lie algebra generators: - /// G = boost::make_shared(SOn::VectorizedGenerators(p)); - FrobeniusWormholeFactor(Key j1, Key j2, const Rot3 &R12, size_t p = 4, - const SharedNoiseModel &model = nullptr, - const boost::shared_ptr &G = nullptr); - - /// @} - /// @name Testable - /// @{ - - /// print with optional string - void - print(const std::string &s, - const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; - - /// assert equality up to a tolerance - bool equals(const NonlinearFactor &expected, - double tol = 1e-9) const override; - - /// @} - /// @name NoiseModelFactor2 methods - /// @{ - - /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] - /// projects down from SO(p) to the Stiefel manifold of px3 matrices. - Vector evaluateError(const SOn& Q1, const SOn& Q2, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const override; - /// @} -}; - } // namespace gtsam diff --git a/gtsam/slam/KarcherMeanFactor-inl.h b/gtsam/slam/KarcherMeanFactor-inl.h index f10cc7e42..c81a9adc5 100644 --- a/gtsam/slam/KarcherMeanFactor-inl.h +++ b/gtsam/slam/KarcherMeanFactor-inl.h @@ -58,20 +58,22 @@ T FindKarcherMean(std::initializer_list&& rotations) { template template -KarcherMeanFactor::KarcherMeanFactor(const CONTAINER& keys, int d) - : NonlinearFactor(keys) { +KarcherMeanFactor::KarcherMeanFactor(const CONTAINER &keys, int d, + boost::optional beta) + : NonlinearFactor(keys), d_(static_cast(d)) { if (d <= 0) { throw std::invalid_argument( "KarcherMeanFactor needs dimension for dynamic types."); } - // Create the constant Jacobian made of D*D identity matrices, - // where D is the dimensionality of the manifold. - const auto I = Eigen::MatrixXd::Identity(d, d); + // Create the constant Jacobian made of d*d identity matrices, + // where d is the dimensionality of the manifold. + Matrix A = Matrix::Identity(d, d); + if (beta) A *= std::sqrt(*beta); std::map terms; for (Key j : keys) { - terms[j] = I; + terms[j] = A; } - jacobian_ = - boost::make_shared(terms, Eigen::VectorXd::Zero(d)); + whitenedJacobian_ = + boost::make_shared(terms, Vector::Zero(d)); } } // namespace gtsam \ No newline at end of file diff --git a/gtsam/slam/KarcherMeanFactor.h b/gtsam/slam/KarcherMeanFactor.h index 54b3930d4..b7cd3b11a 100644 --- a/gtsam/slam/KarcherMeanFactor.h +++ b/gtsam/slam/KarcherMeanFactor.h @@ -30,44 +30,51 @@ namespace gtsam { * PriorFactors. */ template -T FindKarcherMean(const std::vector>& rotations); +T FindKarcherMean(const std::vector> &rotations); -template -T FindKarcherMean(std::initializer_list&& rotations); +template T FindKarcherMean(std::initializer_list &&rotations); /** * The KarcherMeanFactor creates a constraint on all SO(n) variables with * given keys that the Karcher mean (see above) will stay the same. Note the * mean itself is irrelevant to the constraint and is not a parameter: the * constraint is implemented as enforcing that the sum of local updates is - * equal to zero, hence creating a rank-dim constraint. Note it is implemented as - * a soft constraint, as typically it is used to fix a gauge freedom. + * equal to zero, hence creating a rank-dim constraint. Note it is implemented + * as a soft constraint, as typically it is used to fix a gauge freedom. * */ -template -class KarcherMeanFactor : public NonlinearFactor { +template class KarcherMeanFactor : public NonlinearFactor { + // Compile time dimension: can be -1 + enum { D = traits::dimension }; + + // Runtime dimension: always >=0 + size_t d_; + /// Constant Jacobian made of d*d identity matrices - boost::shared_ptr jacobian_; + boost::shared_ptr whitenedJacobian_; - enum {D = traits::dimension}; - - public: - /// Construct from given keys. +public: + /** + * Construct from given keys. + * If parameter beta is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(beta). + */ template - KarcherMeanFactor(const CONTAINER& keys, int d=D); + KarcherMeanFactor(const CONTAINER &keys, int d = D, + boost::optional beta = boost::none); /// Destructor virtual ~KarcherMeanFactor() {} /// Calculate the error of the factor: always zero - double error(const Values& c) const override { return 0; } + double error(const Values &c) const override { return 0; } /// get the dimension of the factor (number of rows on linearization) - size_t dim() const override { return D; } + size_t dim() const override { return d_; } /// linearize to a GaussianFactor - boost::shared_ptr linearize(const Values& c) const override { - return jacobian_; + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; } }; // \KarcherMeanFactor -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index d7b067d70..797e778a3 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -570,7 +570,7 @@ GraphAndValues load2D(pair dataset, size_t maxIndex, /* ************************************************************************* */ GraphAndValues load2D_robust(const string &filename, - noiseModel::Base::shared_ptr &model, + const noiseModel::Base::shared_ptr &model, size_t maxIndex) { return load2D(filename, model, maxIndex); } diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 53abe55ba..f6752eb34 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -172,7 +172,7 @@ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, size_t maxIndex = 0); + const noiseModel::Base::shared_ptr& model, size_t maxIndex = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, diff --git a/gtsam/slam/expressions.h b/gtsam/slam/expressions.h index d60923d8e..680f2d175 100644 --- a/gtsam/slam/expressions.h +++ b/gtsam/slam/expressions.h @@ -75,30 +75,6 @@ inline Unit3_ unrotate(const Rot3_& x, const Unit3_& p) { return Unit3_(x, &Rot3::unrotate, p); } -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -namespace internal { -// define a rotate and unrotate for Vector3 -inline Vector3 rotate(const Rot3& R, const Vector3& v, - OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) { - return R.rotate(v, H1, H2); -} -inline Vector3 unrotate(const Rot3& R, const Vector3& v, - OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) { - return R.unrotate(v, H1, H2); -} -} // namespace internal -inline Expression rotate(const Rot3_& R, - const Expression& v) { - return Expression(internal::rotate, R, v); -} -inline Expression unrotate(const Rot3_& R, - const Expression& v) { - return Expression(internal::unrotate, R, v); -} -#endif - // Projection typedef Expression Cal3_S2_; diff --git a/gtsam/slam/tests/testFrobeniusFactor.cpp b/gtsam/slam/tests/testFrobeniusFactor.cpp index 9cb0c19fa..321b54c86 100644 --- a/gtsam/slam/tests/testFrobeniusFactor.cpp +++ b/gtsam/slam/tests/testFrobeniusFactor.cpp @@ -188,54 +188,6 @@ TEST(FrobeniusBetweenFactorSO4, evaluateError) { EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } -//****************************************************************************** -namespace submanifold { -SO4 id; -Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); -SO3 R1 = SO3::Expmap(v1.tail<3>()); -SO4 Q1 = SO4::Expmap(v1); -Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); -SO3 R2 = SO3::Expmap(v2.tail<3>()); -SO4 Q2 = SO4::Expmap(v2); -SO3 R12 = R1.between(R2); -} // namespace submanifold - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, evaluateError) { - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // dimension = 6 not 16 - for (const size_t p : {5, 4, 3}) { - Matrix M = Matrix::Identity(p, p); - M.topLeftCorner(3, 3) = submanifold::R1.matrix(); - SOn Q1(M); - M.topLeftCorner(3, 3) = submanifold::R2.matrix(); - SOn Q2(M); - auto factor = - FrobeniusWormholeFactor(1, 2, Rot3(::so3::R12.matrix()), p, model); - Matrix H1, H2; - factor.evaluateError(Q1, Q2, H1, H2); - - // Test derivatives - Values values; - values.insert(1, Q1); - values.insert(2, Q2); - EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); - } -} - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, equivalenceToSO3) { - using namespace ::submanifold; - auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension - auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); - auto factor4 = FrobeniusWormholeFactor(1, 2, Rot3(R12.matrix()), 4, model); - const Matrix3 E3(factor3.evaluateError(R1, R2).data()); - const Matrix43 E4( - factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); - EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); - EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); -} - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index 010b32710..ec161baa8 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -108,7 +108,7 @@ list(APPEND GTSAM_EXPORTED_TARGETS gtsam_unstable) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) # Wrap version for gtsam_unstable -if (GTSAM_INSTALL_MATLAB_TOOLBOX) +if (GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX) # Set up codegen include(GtsamMatlabWrap) @@ -119,8 +119,8 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX) endif() # Wrap - wrap_and_install_library(gtsam_unstable.h "gtsam" "" "${mexFlags}") -endif(GTSAM_INSTALL_MATLAB_TOOLBOX) + wrap_and_install_library(gtsam_unstable.i "gtsam" "" "${mexFlags}") +endif(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX) # Build examples diff --git a/gtsam_unstable/gtsam_unstable.h b/gtsam_unstable/gtsam_unstable.i similarity index 99% rename from gtsam_unstable/gtsam_unstable.h rename to gtsam_unstable/gtsam_unstable.i index ef2d16bf0..05b30bb0b 100644 --- a/gtsam_unstable/gtsam_unstable.h +++ b/gtsam_unstable/gtsam_unstable.i @@ -12,10 +12,14 @@ class gtsam::Point2Vector; class gtsam::Rot2; class gtsam::Pose2; class gtsam::Point3; +class gtsam::SO3; +class gtsam::SO4; +class gtsam::SOn; class gtsam::Rot3; class gtsam::Pose3; virtual class gtsam::noiseModel::Base; virtual class gtsam::noiseModel::Gaussian; +virtual class gtsam::noiseModel::Isotropic; virtual class gtsam::imuBias::ConstantBias; virtual class gtsam::NonlinearFactor; virtual class gtsam::NoiseModelFactor; @@ -39,6 +43,7 @@ class gtsam::KeyVector; class gtsam::LevenbergMarquardtParams; class gtsam::ISAM2Params; class gtsam::GaussianDensity; +class gtsam::LevenbergMarquardtOptimizer; namespace gtsam { @@ -282,7 +287,6 @@ virtual class PriorFactor : gtsam::NoiseModelFactor { void serializable() const; // enabling serialization functionality }; - #include template virtual class BetweenFactor : gtsam::NoiseModelFactor { diff --git a/gtsam_unstable/slam/BetweenFactorEM.h b/gtsam_unstable/slam/BetweenFactorEM.h index d551209c9..43607ac41 100644 --- a/gtsam_unstable/slam/BetweenFactorEM.h +++ b/gtsam_unstable/slam/BetweenFactorEM.h @@ -114,7 +114,7 @@ public: /** implement functions needed to derive from Factor */ /* ************************************************************************* */ - virtual double error(const Values& x) const { + double error(const Values &x) const override { return whitenedError(x).squaredNorm(); } @@ -125,8 +125,7 @@ public: * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ /* This version of linearize recalculates the noise model each time */ - virtual boost::shared_ptr linearize( - const Values& x) const { + boost::shared_ptr linearize(const Values &x) const override { // Only linearize if the factor is active if (!this->active(x)) return boost::shared_ptr(); diff --git a/gtsam_unstable/timing/process_shonan_timing_results.py b/gtsam_unstable/timing/process_shonan_timing_results.py new file mode 100644 index 000000000..9cf934dba --- /dev/null +++ b/gtsam_unstable/timing/process_shonan_timing_results.py @@ -0,0 +1,215 @@ +""" +Process timing results from timeShonanAveraging +""" + +import xlrd +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter +import heapq +from collections import Counter + +def make_combined_plot(name, p_values, times, costs, min_cost_range=10): + """ Make a plot that combines timing and SO(3) cost. + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + Will calculate the range of the costs, default minimum range = 10.0 + """ + min_cost = min(costs) + cost_range = max(max(costs)-min_cost,min_cost_range) + fig = plt.figure() + ax1 = fig.add_subplot(111) + ax1.plot(p_values, times, label="time") + ax1.set_ylabel('Time used to optimize \ seconds') + ax1.set_xlabel('p_value') + ax2 = ax1.twinx() + ax2.plot(p_values, costs, 'r', label="cost") + ax2.set_ylabel('Cost at SO(3) form') + ax2.set_xlabel('p_value') + ax2.set_xticks(p_values) + ax2.set_ylim(min_cost, min_cost + cost_range) + plt.title(name, fontsize=12) + ax1.legend(loc="upper left") + ax2.legend(loc="upper right") + plt.interactive(False) + plt.show() + +def make_convergence_plot(name, p_values, times, costs, iter=10): + """ Make a bar that show the success rate for each p_value according to whether the SO(3) cost converges + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + iter: int of iteration number for each p_value + """ + + max_cost = np.mean(np.array(heapq.nlargest(iter, costs))) + # calculate mean costs for each p value + p_values = list(dict(Counter(p_values)).keys()) + # make sure the iter number + iter = int(len(times)/len(p_values)) + p_mean_cost = [np.mean(np.array(costs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_max = p_values[p_mean_cost.index(max(p_mean_cost))] + # print(p_mean_cost) + # print(p_max) + + #take mean and make the combined plot + mean_times = [] + mean_costs = [] + for p in p_values: + costs_tmp = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_cost = sum(costs_tmp)/len(costs_tmp) + mean_costs.append(mean_cost) + times_tmp = times[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_time = sum(times_tmp)/len(times_tmp) + mean_times.append(mean_time) + make_combined_plot(name, p_values,mean_times, mean_costs) + + # calculate the convergence rate for each p_value + p_success_rates = [] + if p_mean_cost[0] >= 0.95*np.mean(np.array(costs)) and p_mean_cost[0] <= 1.05*np.mean(np.array(costs)): + p_success_rates = [ 1.0 for p in p_values] + else: + for p in p_values: + if p > p_max: + p_costs = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + # print(p_costs) + converged = [ int(p_cost < 0.3*max_cost) for p_cost in p_costs] + success_rate = sum(converged)/len(converged) + p_success_rates.append(success_rate) + else: + p_success_rates.append(0) + + plt.bar(p_values, p_success_rates, align='center', alpha=0.5) + plt.xticks(p_values) + plt.yticks(np.arange(0, 1.2, 0.2), ['0%', '20%', '40%', '60%', '80%', '100%']) + plt.xlabel("p_value") + plt.ylabel("success rate") + plt.title(name, fontsize=12) + plt.show() + +def make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds): + """ Make a plot that combines time for optimizing, time for optimizing and compute min_eigen, + min_eigen, subound (subound = (f_R - f_SDP) / f_SDP). + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times1: list of timings (seconds) + costPs: f_SDP + cost3s: f_R + times2: list of timings (seconds) + min_eigens: list of min_eigen (double) + subounds: list of subound (double) + """ + + if dict(Counter(p_values))[5] != 1: + p_values = list(dict(Counter(p_values)).keys()) + iter = int(len(times1)/len(p_values)) + p_mean_times1 = [np.mean(np.array(times1[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_times2 = [np.mean(np.array(times2[i*iter:(i+1)*iter])) for i in range(len(p_values))] + print("p_values \n", p_values) + print("p_mean_times_opti \n", p_mean_times1) + print("p_mean_times_eig \n", p_mean_times2) + + p_mean_costPs = [np.mean(np.array(costPs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_cost3s = [np.mean(np.array(cost3s[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_lambdas = [np.mean(np.array(min_eigens[i*iter:(i+1)*iter])) for i in range(len(p_values))] + + print("p_mean_costPs \n", p_mean_costPs) + print("p_mean_cost3s \n", p_mean_cost3s) + print("p_mean_lambdas \n", p_mean_lambdas) + print("*******************************************************************************************************************") + + + else: + plt.figure(1) + plt.ylabel('Time used (seconds)') + plt.xlabel('p_value') + plt.plot(p_values, times1, 'r', label="time for optimizing") + plt.plot(p_values, times2, 'blue', label="time for optimizing and check") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(2) + plt.ylabel('Min eigen_value') + plt.xlabel('p_value') + plt.plot(p_values, min_eigens, 'r', label="min_eigen values") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(3) + plt.ylabel('sub_bounds') + plt.xlabel('p_value') + plt.plot(p_values, subounds, 'blue', label="sub_bounds") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.show() + +# Process arguments and call plot function +import argparse +import csv +import os + +parser = argparse.ArgumentParser() +parser.add_argument("path") +args = parser.parse_args() + + +file_path = [] +domain = os.path.abspath(args.path) +for info in os.listdir(args.path): + file_path.append(os.path.join(domain, info)) +file_path.sort() +print(file_path) + + +# name of all the plots +names = {} +names[0] = 'tinyGrid3D vertex = 9, edge = 11' +names[1] = 'smallGrid3D vertex = 125, edge = 297' +names[2] = 'parking-garage vertex = 1661, edge = 6275' +names[3] = 'sphere2500 vertex = 2500, edge = 4949' +# names[4] = 'sphere_bignoise vertex = 2200, edge = 8647' +names[5] = 'torus3D vertex = 5000, edge = 9048' +names[6] = 'cubicle vertex = 5750, edge = 16869' +names[7] = 'rim vertex = 10195, edge = 29743' + +# Parse CSV file +for key, name in names.items(): + print(key, name) + + # find according file to process + name_file = None + for path in file_path: + if name[0:3] in path: + name_file = path + if name_file == None: + print("The file %s is not in the path" % name) + continue + + p_values, times1, costPs, cost3s, times2, min_eigens, subounds = [],[],[],[],[],[],[] + with open(name_file) as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + for row in reader: + print(row) + p_values.append(int(row[0])) + times1.append(float(row[1])) + costPs.append(float(row[2])) + cost3s.append(float(row[3])) + if len(row) > 4: + times2.append(float(row[4])) + min_eigens.append(float(row[5])) + subounds.append(float(row[6])) + + #plot + # make_combined_plot(name, p_values, times1, cost3s) + # make_convergence_plot(name, p_values, times1, cost3s) + make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds) diff --git a/gtsam_unstable/timing/timeShonanAveraging.cpp b/gtsam_unstable/timing/timeShonanAveraging.cpp new file mode 100644 index 000000000..795961aef --- /dev/null +++ b/gtsam_unstable/timing/timeShonanAveraging.cpp @@ -0,0 +1,182 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date September 2019 + * @author Jing Wu + * @author Frank Dellaert + * @brief Timing script for Shonan Averaging algorithm + */ + +#include "gtsam/base/Matrix.h" +#include "gtsam/base/Vector.h" +#include "gtsam/geometry/Point3.h" +#include "gtsam/geometry/Rot3.h" +#include +#include + +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +// string g2oFile = findExampleDataFile("toyExample.g2o"); + +// save a single line of timing info to an output stream +void saveData(size_t p, double time1, double costP, double cost3, double time2, + double min_eigenvalue, double suBound, std::ostream* os) { + *os << (int)p << "\t" << time1 << "\t" << costP << "\t" << cost3 << "\t" + << time2 << "\t" << min_eigenvalue << "\t" << suBound << endl; +} + +void checkR(const Matrix& R) { + Matrix R2 = R.transpose(); + Matrix actual_R = R2 * R; + assert_equal(Rot3(),Rot3(actual_R)); +} + +void saveResult(string name, const Values& values) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".dat"); + size_t nrSO3 = values.count(); + myfile << "#Type SO3 Number " << nrSO3 << "\n"; + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << i; + for (int m = 0; m < 3; ++m) { + for (int n = 0; n < 3; ++n) { + myfile << " " << R(m, n); + } + } + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.dat file" << endl; +} + +void saveG2oResult(string name, const Values& values, std::map poses) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".g2o"); + size_t nrSO3 = values.count(); + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << "VERTEX_SE3:QUAT" << " "; + myfile << i << " "; + myfile << poses[i].x() << " " << poses[i].y() << " " << poses[i].z() << " "; + Vector quaternion = Rot3(R).quaternion(); + myfile << quaternion(3) << " " << quaternion(2) << " " << quaternion(1) << " " << quaternion(0); + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.g2o file" << endl; +} + +void saveResultQuat(const Values& values) { + ofstream myfile; + myfile.open("shonan_result.dat"); + size_t nrSOn = values.count(); + for (int i = 0; i < nrSOn; ++i) { + GTSAM_PRINT(values.at(i)); + Rot3 R = Rot3(values.at(i).matrix()); + float x = R.toQuaternion().x(); + float y = R.toQuaternion().y(); + float z = R.toQuaternion().z(); + float w = R.toQuaternion().w(); + myfile << "QuatSO3 " << i; + myfile << "QuatSO3 " << i << " " << w << " " << x << " " << y << " " << z << "\n"; + myfile.close(); + } +} + +int main(int argc, char* argv[]) { + // primitive argument parsing: + if (argc > 3) { + throw runtime_error("Usage: timeShonanAveraging [g2oFile]"); + } + + string g2oFile; + try { + if (argc > 1) + g2oFile = argv[argc - 1]; + else + g2oFile = string( + "/home/jingwu/git/SESync/data/sphere2500.g2o"); + + } catch (const exception& e) { + cerr << e.what() << '\n'; + exit(1); + } + + // Create a csv output file + size_t pos1 = g2oFile.find("data/"); + size_t pos2 = g2oFile.find(".g2o"); + string name = g2oFile.substr(pos1 + 5, pos2 - pos1 - 5); + cout << name << endl; + ofstream csvFile("shonan_timing_of_" + name + ".csv"); + + // Create Shonan averaging instance from the file. + // ShonanAveragingParameters parameters; + // double sigmaNoiseInRadians = 0 * M_PI / 180; + // parameters.setNoiseSigma(sigmaNoiseInRadians); + static const ShonanAveraging3 kShonan(g2oFile); + + // increase p value and try optimize using Shonan Algorithm. use chrono for + // timing + size_t pMin = 3; + Values Qstar; + Vector minEigenVector; + double CostP = 0, Cost3 = 0, lambdaMin = 0, suBound = 0; + cout << "(int)p" << "\t" << "time1" << "\t" << "costP" << "\t" << "cost3" << "\t" + << "time2" << "\t" << "MinEigenvalue" << "\t" << "SuBound" << endl; + + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + + for (size_t p = pMin; p < 6; p++) { + // Randomly initialize at lowest level, initialize by line search after that + const Values initial = + (p > pMin) ? kShonan.initializeWithDescent(p, Qstar, minEigenVector, + lambdaMin) + : ShonanAveraging::LiftTo(pMin, randomRotations); + chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); + // optimizing + const Values result = kShonan.tryOptimizingAt(p, initial); + chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); + chrono::duration timeUsed1 = + chrono::duration_cast>(t2 - t1); + lambdaMin = kShonan.computeMinEigenValue(result, &minEigenVector); + chrono::steady_clock::time_point t3 = chrono::steady_clock::now(); + chrono::duration timeUsed2 = + chrono::duration_cast>(t3 - t1); + Qstar = result; + CostP = kShonan.costAt(p, result); + const Values SO3Values = kShonan.roundSolution(result); + Cost3 = kShonan.cost(SO3Values); + suBound = (Cost3 - CostP) / CostP; + + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &cout); + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &csvFile); + } + saveResult(name, kShonan.roundSolution(Qstar)); + // saveG2oResult(name, kShonan.roundSolution(Qstar), kShonan.Poses()); + return 0; +} diff --git a/matlab/+gtsam/Point2.m b/matlab/+gtsam/Point2.m new file mode 100644 index 000000000..3ea65c33e --- /dev/null +++ b/matlab/+gtsam/Point2.m @@ -0,0 +1,12 @@ +function pt = Point2(varargin) + % Point2 shim + if nargin == 2 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0]'; + else + error('Arguments do not match any overload of Point2 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/Point3.m b/matlab/+gtsam/Point3.m new file mode 100644 index 000000000..5f66b4517 --- /dev/null +++ b/matlab/+gtsam/Point3.m @@ -0,0 +1,12 @@ +function pt = Point3(varargin) + % Point3 shim + if nargin == 3 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2} varargin{3}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0 0]'; + else + error('Arguments do not match any overload of Point3 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/cylinderSampleProjection.m b/matlab/+gtsam/cylinderSampleProjection.m index 2b913b52d..697a57faa 100644 --- a/matlab/+gtsam/cylinderSampleProjection.m +++ b/matlab/+gtsam/cylinderSampleProjection.m @@ -50,9 +50,9 @@ for i = 1:cylinderNum visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = pose.translation().between(sampledPoint3); + rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid); + rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3); % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampleProjectionStereo.m b/matlab/+gtsam/cylinderSampleProjectionStereo.m index 10409ac6d..58b4140fd 100644 --- a/matlab/+gtsam/cylinderSampleProjectionStereo.m +++ b/matlab/+gtsam/cylinderSampleProjectionStereo.m @@ -25,20 +25,20 @@ for i = 1:cylinderNum % For Cheirality Exception sampledPoint3 = cylinders{i}.Points{j}; sampledPoint3local = pose.transformTo(sampledPoint3); - if sampledPoint3local.z < 0 + if sampledPoint3local(3) < 0 continue; end % measurements - Z.du = K.fx() * K.baseline() / sampledPoint3local.z; - Z.uL = K.fx() * sampledPoint3local.x / sampledPoint3local.z + K.px(); + Z.du = K.fx() * K.baseline() / sampledPoint3local(3); + Z.uL = K.fx() * sampledPoint3local(1) / sampledPoint3local(3) + K.px(); Z.uR = Z.uL + Z.du; - Z.v = K.fy() / sampledPoint3local.z + K.py(); + Z.v = K.fy() / sampledPoint3local(3) + K.py(); % ignore points not visible in the scene - if Z.uL < 0 || Z.uL >= imageSize.x || ... - Z.uR < 0 || Z.uR >= imageSize.x || ... - Z.v < 0 || Z.v >= imageSize.y + if Z.uL < 0 || Z.uL >= imageSize(1) || ... + Z.uR < 0 || Z.uR >= imageSize(1) || ... + Z.v < 0 || Z.v >= imageSize(2) continue; end @@ -54,9 +54,9 @@ for i = 1:cylinderNum visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = sampledPoint3 - pose.translation(); + rayCameraToCylinder = cylinders{k}.centroid - pose.translation(); + rayCylinderToPoint = sampledPoint3 - cylinders{k}.centroid; % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampling.m b/matlab/+gtsam/cylinderSampling.m index dcaa9c721..dc76295fa 100644 --- a/matlab/+gtsam/cylinderSampling.m +++ b/matlab/+gtsam/cylinderSampling.m @@ -12,8 +12,8 @@ function [cylinder] = cylinderSampling(baseCentroid, radius, height, density) % sample the points for i = 1:pointsNum theta = 2 * pi * rand; - x = radius * cos(theta) + baseCentroid.x; - y = radius * sin(theta) + baseCentroid.y; + x = radius * cos(theta) + baseCentroid(1); + y = radius * sin(theta) + baseCentroid(2); z = height * rand; points3{i,1} = Point3([x,y,z]'); end @@ -22,5 +22,5 @@ function [cylinder] = cylinderSampling(baseCentroid, radius, height, density) cylinder.radius = radius; cylinder.height = height; cylinder.Points = points3; - cylinder.centroid = Point3(baseCentroid.x, baseCentroid.y, height/2); + cylinder.centroid = Point3(baseCentroid(1), baseCentroid(2), height/2); end \ No newline at end of file diff --git a/matlab/+gtsam/plotCamera.m b/matlab/+gtsam/plotCamera.m index d0d1f45b7..986cd9a68 100644 --- a/matlab/+gtsam/plotCamera.m +++ b/matlab/+gtsam/plotCamera.m @@ -1,7 +1,7 @@ function plotCamera(pose, axisLength) hold on - C = pose.translation().vector(); + C = pose.translation(); R = pose.rotation().matrix(); xAxis = C+R(:,1)*axisLength; diff --git a/matlab/+gtsam/plotFlyingResults.m b/matlab/+gtsam/plotFlyingResults.m index 5d4a287c4..202f2409b 100644 --- a/matlab/+gtsam/plotFlyingResults.m +++ b/matlab/+gtsam/plotFlyingResults.m @@ -13,7 +13,7 @@ set(gcf, 'Position', [80,1,1800,1000]); %% plot all the cylinders and sampled points axis equal -axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); +axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); xlabel('X (m)'); ylabel('Y (m)'); zlabel('Height (m)'); @@ -50,8 +50,8 @@ for i = 1:cylinderNum [X,Y,Z] = cylinder(cylinders{i}.radius, sampleDensity * cylinders{i}.radius * cylinders{i}.height); - X = X + cylinders{i}.centroid.x; - Y = Y + cylinders{i}.centroid.y; + X = X + cylinders{i}.centroid(1); + Y = Y + cylinders{i}.centroid(2); Z = Z * cylinders{i}.height; h_cylinder{i} = surf(X,Y,Z); @@ -76,7 +76,7 @@ for i = 1:posesSize %plotCamera(poses{i}, 3); gRp = poses{i}.rotation().matrix(); % rotation from pose to global - C = poses{i}.translation().vector(); + C = poses{i}.translation(); axisLength = 3; xAxis = C+gRp(:,1)*axisLength; @@ -111,14 +111,14 @@ for i = 1:posesSize for j = 1:pointSize if ~isempty(pts3d{j}.cov{i}) hold on - h_point{j} = plot3(pts3d{j}.data.x, pts3d{j}.data.y, pts3d{j}.data.z); - h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data.x; pts3d{j}.data.y; pts3d{j}.data.z], ... + h_point{j} = plot3(pts3d{j}.data(1), pts3d{j}.data(2), pts3d{j}.data(3)); + h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data(1); pts3d{j}.data(2); pts3d{j}.data(3)], ... pts3d{j}.cov{i}, options.plot.covarianceScale); end end axis equal - axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); + axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); drawnow @@ -158,7 +158,7 @@ for i = 1 : posesSize hold on campos([poses{i}.x, poses{i}.y, poses{i}.z]); - camtarget([options.fieldSize.x/2, options.fieldSize.y/2, 0]); + camtarget([options.fieldSize(1)/2, options.fieldSize(2)/2, 0]); camlight(hlight, 'headlight'); if options.writeVideo diff --git a/matlab/+gtsam/plotPoint2.m b/matlab/+gtsam/plotPoint2.m index cd066951d..8d10ecab6 100644 --- a/matlab/+gtsam/plotPoint2.m +++ b/matlab/+gtsam/plotPoint2.m @@ -1,10 +1,10 @@ function plotPoint2(p,color,P) % plotPoint2 shows a Point2, possibly with covariance matrix if size(color,2)==1 - plot(p.x,p.y,[color '*']); + plot(p(1),p(2),[color '*']); else - plot(p.x,p.y,color); + plot(p(1),p(2),color); end if exist('P', 'var') && (~isempty(P)) - gtsam.covarianceEllipse([p.x;p.y],P,color(1)); + gtsam.covarianceEllipse([p(1);p(2)],P,color(1)); end \ No newline at end of file diff --git a/matlab/+gtsam/plotPoint3.m b/matlab/+gtsam/plotPoint3.m index 390b44939..85c84a210 100644 --- a/matlab/+gtsam/plotPoint3.m +++ b/matlab/+gtsam/plotPoint3.m @@ -1,12 +1,12 @@ function plotPoint3(p, color, P) %PLOTPOINT3 Plot a Point3 with an optional covariance matrix if size(color,2)==1 - plot3(p.x,p.y,p.z,[color '*']); + plot3(p(1),p(2),p(3),[color '*']); else - plot3(p.x,p.y,p.z,color); + plot3(p(1),p(2),p(3),color); end if exist('P', 'var') - gtsam.covarianceEllipse3D([p.x;p.y;p.z],P); + gtsam.covarianceEllipse3D([p(1);p(2);p(3)],P); end end diff --git a/matlab/+gtsam/plotPose3.m b/matlab/+gtsam/plotPose3.m index 8c3c7dd76..258e3f776 100644 --- a/matlab/+gtsam/plotPose3.m +++ b/matlab/+gtsam/plotPose3.m @@ -4,7 +4,7 @@ if nargin<3,axisLength=0.1;end % get rotation and translation (center) gRp = pose.rotation().matrix(); % rotation from pose to global -C = pose.translation().vector(); +C = pose.translation(); if ~isempty(axisLength) % draw the camera axes diff --git a/matlab/+gtsam/points2DTrackStereo.m b/matlab/+gtsam/points2DTrackStereo.m index 60c9f1295..04cddb7f7 100644 --- a/matlab/+gtsam/points2DTrackStereo.m +++ b/matlab/+gtsam/points2DTrackStereo.m @@ -38,7 +38,7 @@ graph.add(PriorFactorPose3(symbol('x', 1), cameraPoses{1}, posePriorNoise)); %% initialize graph and values initialEstimate = Values; for i = 1:pointsNum - point_j = points3d{i}.data.retract(0.05*randn(3,1)); + point_j = points3d{i}.data + (0.05*randn(3,1)); initialEstimate.insert(symbol('p', i), point_j); end diff --git a/matlab/gtsam_examples/CameraFlyingExample.m b/matlab/gtsam_examples/CameraFlyingExample.m index add2bc75a..a0dfef22a 100644 --- a/matlab/gtsam_examples/CameraFlyingExample.m +++ b/matlab/gtsam_examples/CameraFlyingExample.m @@ -46,7 +46,7 @@ options.camera.horizon = 60; % camera baseline options.camera.baseline = 0.05; % camera focal length -options.camera.f = round(options.camera.resolution.x * options.camera.horizon / ... +options.camera.f = round(options.camera.resolution(1) * options.camera.horizon / ... options.camera.fov); % camera focal baseline options.camera.fB = options.camera.f * options.camera.baseline; @@ -54,15 +54,15 @@ options.camera.fB = options.camera.f * options.camera.baseline; options.camera.disparity = options.camera.fB / options.camera.horizon; % Monocular Camera Calibration options.camera.monoK = Cal3_S2(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2); + options.camera.resolution(1)/2, options.camera.resolution(2)/2); % Stereo Camera Calibration options.camera.stereoK = Cal3_S2Stereo(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2, options.camera.disparity); + options.camera.resolution(1)/2, options.camera.resolution(2)/2, options.camera.disparity); % write video output options.writeVideo = true; % the testing field size (unit: meter) -options.fieldSize = Point2([100, 100]'); +options.fieldSize = Point2(100, 100); % camera flying speed (unit: meter / second) options.speed = 20; % camera flying height @@ -113,14 +113,14 @@ theta = 0; i = 1; while i <= cylinderNum theta = theta + 2*pi/10; - x = 40 * rand * cos(theta) + options.fieldSize.x/2; - y = 20 * rand * sin(theta) + options.fieldSize.y/2; - baseCentroid{i} = Point2([x, y]'); + x = 40 * rand * cos(theta) + options.fieldSize(1)/2; + y = 20 * rand * sin(theta) + options.fieldSize(2)/2; + baseCentroid{i} = Point2(x, y); % prevent two cylinders interact with each other regenerate = false; for j = 1:i-1 - if i > 1 && baseCentroid{i}.dist(baseCentroid{j}) < options.cylinder.radius * 2 + if i > 1 && norm(baseCentroid{i} - baseCentroid{j}) < options.cylinder.radius * 2 regenerate = true; break; end @@ -146,17 +146,17 @@ while 1 angle = 0.1*pi*(rand-0.5); inc_t = Point3(distance * cos(angle), ... distance * sin(angle), 0); - t = t.compose(inc_t); + t = t + inc_t; - if t.x > options.fieldSize.x - 10 || t.y > options.fieldSize.y - 10; + if t(1) > options.fieldSize(1) - 10 || t(2) > options.fieldSize(2) - 10; break; end %t = Point3([(i-1)*(options.fieldSize.x - 10)/options.poseNum + 10, ... % 15, 10]'); camera = PinholeCameraCal3_S2.Lookat(t, ... - Point3(options.fieldSize.x/2, options.fieldSize.y/2, 0), ... - Point3([0,0,1]'), options.camera.monoK); + Point3(options.fieldSize(1)/2, options.fieldSize(2)/2, 0), ... + Point3(0,0,1), options.camera.monoK); cameraPoses{end+1} = camera.pose; end diff --git a/matlab/gtsam_examples/MonocularVOExample.m b/matlab/gtsam_examples/MonocularVOExample.m index 11c4253de..0d09a1487 100644 --- a/matlab/gtsam_examples/MonocularVOExample.m +++ b/matlab/gtsam_examples/MonocularVOExample.m @@ -15,14 +15,14 @@ import gtsam.* %% Create two cameras and corresponding essential matrix E aRb = Rot3.Yaw(pi/2); -aTb = Point3 (0.1, 0, 0); +aTb = [.1, 0, 0]'; identity=Pose3; aPb = Pose3(aRb, aTb); cameraA = CalibratedCamera(identity); cameraB = CalibratedCamera(aPb); %% Create 5 points -P = { Point3(0, 0, 1), Point3(-0.1, 0, 1), Point3(0.1, 0, 1), Point3(0, 0.5, 0.5), Point3(0, -0.5, 0.5) }; +P = { [0, 0, 1]', [-0.1, 0, 1]', [0.1, 0, 1]', [0, 0.5, 0.5]', [0, -0.5, 0.5]' }; %% Project points in both cameras for i=1:5 diff --git a/matlab/gtsam_examples/PlanarSLAMExample.m b/matlab/gtsam_examples/PlanarSLAMExample.m index aec933d74..3febc23e6 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample.m +++ b/matlab/gtsam_examples/PlanarSLAMExample.m @@ -71,9 +71,12 @@ marginals = Marginals(graph, result); plot2DTrajectory(result, [], marginals); plot2DPoints(result, 'b', marginals); -plot([result.atPose2(i1).x; result.atPoint2(j1).x],[result.atPose2(i1).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i2).x; result.atPoint2(j1).x],[result.atPose2(i2).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i3).x; result.atPoint2(j2).x],[result.atPose2(i3).y; result.atPoint2(j2).y], 'c-'); +p_j1 = result.atPoint2(j1); +p_j2 = result.atPoint2(j2); + +plot([result.atPose2(i1).x; p_j1(1)],[result.atPose2(i1).y; p_j1(2)], 'c-'); +plot([result.atPose2(i2).x; p_j1(1)],[result.atPose2(i2).y; p_j1(2)], 'c-'); +plot([result.atPose2(i3).x; p_j2(1)],[result.atPose2(i3).y; p_j2(2)], 'c-'); axis([-0.6 4.8 -1 1]) axis equal view(2) diff --git a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m index 375ed645c..93979a68a 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m +++ b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m @@ -60,15 +60,18 @@ for j=1:2 S{j}=chol(Q{j}); % for sampling end -plot([sample.atPose2(i1).x; sample.atPoint2(j1).x],[sample.atPose2(i1).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i2).x; sample.atPoint2(j1).x],[sample.atPose2(i2).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i3).x; sample.atPoint2(j2).x],[sample.atPose2(i3).y; sample.atPoint2(j2).y], 'c-'); +p_j1 = sample.atPoint2(j1); +p_j2 = sample.atPoint2(j2); + +plot([sample.atPose2(i1).x; p_j1(1)],[sample.atPose2(i1).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i2).x; p_j1(1)],[sample.atPose2(i2).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i3).x; p_j2(1)],[sample.atPose2(i3).y; p_j2(2)], 'c-'); view(2); axis auto; axis equal %% Do Sampling on point 2 N=1000; for s=1:N delta = S{2}*randn(2,1); - proposedPoint = Point2(point{2}.x+delta(1),point{2}.y+delta(2)); + proposedPoint = Point2(point{2} + delta); plotPoint2(proposedPoint,'k.') end \ No newline at end of file diff --git a/matlab/gtsam_examples/Pose2SLAMwSPCG.m b/matlab/gtsam_examples/Pose2SLAMwSPCG.m index 67f22fe1d..2df7efe2f 100644 --- a/matlab/gtsam_examples/Pose2SLAMwSPCG.m +++ b/matlab/gtsam_examples/Pose2SLAMwSPCG.m @@ -54,7 +54,7 @@ initialEstimate.print(sprintf('\nInitial estimate:\n')); %% Optimize using Levenberg-Marquardt optimization with SubgraphSolver params = gtsam.LevenbergMarquardtParams; subgraphParams = gtsam.SubgraphSolverParameters; -params.setLinearSolverType('CONJUGATE_GRADIENT'); +params.setLinearSolverType('ITERATIVE'); params.setIterativeParams(subgraphParams); optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate); result = optimizer.optimize(); diff --git a/matlab/gtsam_examples/SBAExample.m b/matlab/gtsam_examples/SBAExample.m index 584ace09a..f94a2ee4e 100644 --- a/matlab/gtsam_examples/SBAExample.m +++ b/matlab/gtsam_examples/SBAExample.m @@ -47,7 +47,7 @@ end %% Add Gaussian priors for a pose and a landmark to constrain the system cameraPriorNoise = noiseModel.Diagonal.Sigmas(cameraNoiseSigmas); firstCamera = PinholeCameraCal3_S2(truth.cameras{1}.pose, truth.K); -graph.add(PriorFactorSimpleCamera(symbol('c',1), firstCamera, cameraPriorNoise)); +graph.add(PriorFactorPinholeCameraCal3_S2(symbol('c',1), firstCamera, cameraPriorNoise)); pointPriorNoise = noiseModel.Isotropic.Sigma(3,pointNoiseSigma); graph.add(PriorFactorPoint3(symbol('p',1), truth.points{1}, pointPriorNoise)); @@ -64,7 +64,7 @@ for i=1:size(truth.cameras,2) initialEstimate.insert(symbol('c',i), camera_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/SFMExample.m b/matlab/gtsam_examples/SFMExample.m index 6700e90d2..a57da929c 100644 --- a/matlab/gtsam_examples/SFMExample.m +++ b/matlab/gtsam_examples/SFMExample.m @@ -58,7 +58,7 @@ for i=1:size(truth.cameras,2) initialEstimate.insert(symbol('x',i), pose_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/StereoVOExample.m b/matlab/gtsam_examples/StereoVOExample.m index b437ca80c..e7345fcf2 100644 --- a/matlab/gtsam_examples/StereoVOExample.m +++ b/matlab/gtsam_examples/StereoVOExample.m @@ -51,10 +51,10 @@ graph.add(GenericStereoFactor3D(StereoPoint2(320, 270, 115), stereo_model, x2, l initialEstimate = Values; initialEstimate.insert(x1, first_pose); % noisy estimate for pose 2 -initialEstimate.insert(x2, Pose3(Rot3(), Point3(0.1,-.1,1.1))); -initialEstimate.insert(l1, Point3( 1, 1, 5)); -initialEstimate.insert(l2, Point3(-1, 1, 5)); -initialEstimate.insert(l3, Point3( 0,-.5, 5)); +initialEstimate.insert(x2, Pose3(Rot3(), [0.1, -.1, 1.1]')); +initialEstimate.insert(l1, [ 1, 1, 5]'); +initialEstimate.insert(l2, [-1, 1, 5]'); +initialEstimate.insert(l3, [ 0,-.5, 5]'); %% optimize fprintf(1,'Optimizing\n'); tic diff --git a/matlab/gtsam_examples/VisualizeMEstimators.m b/matlab/gtsam_examples/VisualizeMEstimators.m index 8a0485334..ce505df5d 100644 --- a/matlab/gtsam_examples/VisualizeMEstimators.m +++ b/matlab/gtsam_examples/VisualizeMEstimators.m @@ -48,7 +48,7 @@ function plot_m_estimator(x, model, plot_title, fig_id, filename) rho = zeros(size(x)); for i = 1:size(x, 2) w(i) = model.weight(x(i)); - rho(i) = model.residual(x(i)); + rho(i) = model.loss(x(i)); end psi = w .* x; diff --git a/matlab/gtsam_tests/testPlanarSLAMExample.m b/matlab/gtsam_tests/testPlanarSLAMExample.m index d3cab5d1f..e0b4dbfc8 100644 --- a/matlab/gtsam_tests/testPlanarSLAMExample.m +++ b/matlab/gtsam_tests/testPlanarSLAMExample.m @@ -66,4 +66,4 @@ CHECK('pose_1.equals(Pose2,1e-4)',pose_1.equals(Pose2,1e-4)); point_1 = result.atPoint2(symbol('l',1)); marginals.marginalCovariance(symbol('l',1)); -CHECK('point_1.equals(Point2(2,2),1e-4)',point_1.equals(Point2(2,2),1e-4)); +CHECK('point_1.equals(Point2(2,2),1e-4)',norm(point_1 - Point2(2,2)) < 1e-4); diff --git a/matlab/gtsam_tests/testSFMExample.m b/matlab/gtsam_tests/testSFMExample.m index 985cbdb2c..a1f63c3a7 100644 --- a/matlab/gtsam_tests/testSFMExample.m +++ b/matlab/gtsam_tests/testSFMExample.m @@ -69,5 +69,5 @@ end for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('p',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/gtsam_tests/testStereoVOExample.m b/matlab/gtsam_tests/testStereoVOExample.m index 218d0ace1..c721244ba 100644 --- a/matlab/gtsam_tests/testStereoVOExample.m +++ b/matlab/gtsam_tests/testStereoVOExample.m @@ -65,4 +65,4 @@ pose_x1 = result.atPose3(x1); CHECK('pose_x1.equals(first_pose,1e-4)',pose_x1.equals(first_pose,1e-4)); point_l1 = result.atPoint3(l1); -CHECK('point_1.equals(expected_l1,1e-4)',point_l1.equals(expected_l1,1e-4)); \ No newline at end of file +CHECK('point_1.equals(expected_l1,1e-4)',norm(point_l1 - expected_l1) < 1e-4); \ No newline at end of file diff --git a/matlab/gtsam_tests/testValues.m b/matlab/gtsam_tests/testValues.m index fe2cd30fe..48bc83f2c 100644 --- a/matlab/gtsam_tests/testValues.m +++ b/matlab/gtsam_tests/testValues.m @@ -5,8 +5,8 @@ values = Values; E = EssentialMatrix(Rot3,Unit3); tol = 1e-9; -values.insert(0, Point2); -values.insert(1, Point3); +values.insert(0, Point2(0, 0)); +values.insert(1, Point3(0, 0, 0)); values.insert(2, Rot2); values.insert(3, Pose2); values.insert(4, Rot3); @@ -21,8 +21,8 @@ values.insert(10, imuBias.ConstantBias); values.insert(11, [1;2;3]); values.insert(12, [1 2;3 4]); -EXPECT('at',values.atPoint2(0).equals(Point2,tol)); -EXPECT('at',values.atPoint3(1).equals(Point3,tol)); +EXPECT('at',values.atPoint2(0) == Point2(0, 0)); +EXPECT('at',values.atPoint3(1) == Point3(0, 0, 0)); EXPECT('at',values.atRot2(2).equals(Rot2,tol)); EXPECT('at',values.atPose2(3).equals(Pose2,tol)); EXPECT('at',values.atRot3(4).equals(Rot3,tol)); diff --git a/matlab/gtsam_tests/testVisualISAMExample.m b/matlab/gtsam_tests/testVisualISAMExample.m index 223e823a6..f75942ea7 100644 --- a/matlab/gtsam_tests/testVisualISAMExample.m +++ b/matlab/gtsam_tests/testVisualISAMExample.m @@ -51,5 +51,5 @@ end for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('l',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison.m b/matlab/unstable_examples/+imuSimulator/IMUComparison.m index 68e20bb25..871f023ef 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison.m @@ -28,19 +28,19 @@ currentVelocityGlobalIMUbody = currentVelocityGlobal; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -120,9 +120,9 @@ for t = times currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m index c589bea32..450697de0 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m @@ -28,7 +28,7 @@ currentVelocityGlobal = velocity; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; @@ -112,7 +112,7 @@ for t = times end %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m index c86e40a21..0d8abad2c 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m @@ -7,7 +7,7 @@ measuredOmega = omega1Body; % Acceleration measurement (in this simple toy example no other forces % act on the body and the only acceleration is the centripetal Coriolis acceleration) -measuredAcc = Point3(cross(omega1Body, velocity1Body)).vector; +measuredAcc = Point3(cross(omega1Body, velocity1Body)); acc_omega = [ measuredAcc; measuredOmega ]; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m index 534b9365e..5ed1fc516 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m @@ -6,16 +6,16 @@ import gtsam.*; % Calculate gyro measured rotation rate by transforming body rotation rate % into the IMU frame. -measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)).vector; +measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)); % Transform both velocities into IMU frame, accounting for the velocity % induced by rigid body rotation on a lever arm (Coriolis effect). velocity1inertial = imuFrame.rotation.unrotate( ... - Point3(velocity1Body + cross(omega1Body, imuFrame.translation.vector))).vector; + Point3(velocity1Body + cross(omega1Body, imuFrame.translation))); imu2in1 = Rot3.Expmap(measuredOmega * deltaT); velocity2inertial = imu2in1.rotate(imuFrame.rotation.unrotate( ... - Point3(velocity2Body + cross(omega2Body, imuFrame.translation.vector)))).vector; + Point3(velocity2Body + cross(omega2Body, imuFrame.translation)))); % Acceleration in IMU frame measuredAcc = (velocity2inertial - velocity1inertial) / deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/coriolisExample.m b/matlab/unstable_examples/+imuSimulator/coriolisExample.m index 35d27aa73..ee4deb433 100644 --- a/matlab/unstable_examples/+imuSimulator/coriolisExample.m +++ b/matlab/unstable_examples/+imuSimulator/coriolisExample.m @@ -190,13 +190,13 @@ for i = 1:length(times) newFactors.add(PriorFactorConstantBias(currentBiasKey, zeroBias, sigma_init_b)); % Store data - positionsInFixedGT(:,1) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,1) = currentPoseFixedGT.translation; velocityInFixedGT(:,1) = currentVelocityFixedGT; - positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation.vector; - %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity.vector; - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation.vector; + positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation; + %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(1).R = currentRotatingFrame.rotation.matrix; else @@ -204,18 +204,18 @@ for i = 1:length(times) % Update the position and velocity % x_t = x_0 + v_0*dt + 1/2*a_0*dt^2 % v_t = v_0 + a_0*dt - currentPositionFixedGT = Point3(currentPoseFixedGT.translation.vector ... + currentPositionFixedGT = Point3(currentPoseFixedGT.translation ... + currentVelocityFixedGT * deltaT + 0.5 * accelFixed * deltaT * deltaT); currentVelocityFixedGT = currentVelocityFixedGT + accelFixed * deltaT; currentPoseFixedGT = Pose3(Rot3, currentPositionFixedGT); % constant orientation % Rotate pose in fixed frame to get pose in rotating frame - previousPositionRotatingGT = currentPoseRotatingGT.translation.vector; + previousPositionRotatingGT = currentPoseRotatingGT.translation; currentRotatingFrame = currentRotatingFrame.compose(changePoseRotatingFrame); inverseCurrentRotatingFrame = (currentRotatingFrame.inverse); currentPoseRotatingGT = inverseCurrentRotatingFrame.compose(currentPoseFixedGT); - currentPositionRotatingGT = currentPoseRotatingGT.translation.vector; + currentPositionRotatingGT = currentPoseRotatingGT.translation; % Get velocity in rotating frame by treating it like a position and using compose % Maybe Luca knows a better way to do this within GTSAM. @@ -230,11 +230,11 @@ for i = 1:length(times) % - 0.5 * accelFixed * deltaT * deltaT) / deltaT + accelFixed * deltaT; % Store GT (ground truth) poses - positionsInFixedGT(:,i) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,i) = currentPoseFixedGT.translation; velocityInFixedGT(:,i) = currentVelocityFixedGT; - positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation.vector; + positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation; velocityInRotatingGT(:,i) = currentVelocityRotatingGT; - currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation.vector; + currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(i).R = currentRotatingFrame.rotation.matrix; %% Estimate trajectory in rotating frame using GTSAM (ground truth measurements) @@ -303,9 +303,9 @@ for i = 1:length(times) currentVelocityEstimate = isam.calculateEstimate(currentVelKey); currentBias = isam.calculateEstimate(currentBiasKey); - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - biasEstimates(:,i) = currentBias.vector; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + biasEstimates(:,i) = currentBias; % In matrix form: R_error = R_gt'*R_estimate % Perform Logmap on the rotation matrix to get a vector diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m index 2eddf75ee..64ba36d3b 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m @@ -151,14 +151,14 @@ hold on; if options.includeCameraFactors b = [-1000 2000 -2000 2000 -30 30]; for i = 1:size(metadata.camera.gtLandmarkPoints,2) - p = metadata.camera.gtLandmarkPoints(i).vector; + p = metadata.camera.gtLandmarkPoints(i); if(p(1) > b(1) && p(1) < b(2) && p(2) > b(3) && p(2) < b(4) && p(3) > b(5) && p(3) < b(6)) plot3(p(1), p(2), p(3), 'k+'); end end pointsToPlot = metadata.camera.gtLandmarkPoints(find(projectionFactorSeenBy > 0)); for i = 1:length(pointsToPlot) - p = pointsToPlot(i).vector; + p = pointsToPlot(i); plot3(p(1), p(2), p(3), 'gs', 'MarkerSize', 10); end end @@ -233,9 +233,9 @@ for k=1:numMonteCarloRuns for i=0:options.trajectoryLength % compute estimation errors currentPoseKey = symbol('x', i); - gtPosition = gtValues.at(currentPoseKey).translation.vector; - estPosition = estimate.at(currentPoseKey).translation.vector; - estR = estimate.at(currentPoseKey).rotation.matrix; + gtPosition = gtValues.atPose3(currentPoseKey).translation; + estPosition = estimate.atPose3(currentPoseKey).translation; + estR = estimate.atPose3(currentPoseKey).rotation.matrix; errPosition = estPosition - gtPosition; % compute covariances: diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m index 00ae4b9c2..07f146dcb 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m @@ -14,7 +14,7 @@ graph = NonlinearFactorGraph; for i=0:length(measurements) % Get the current pose currentPoseKey = symbol('x', i); - currentPose = values.at(currentPoseKey); + currentPose = values.atPose3(currentPoseKey); if i==0 %% first time step, add priors @@ -26,11 +26,11 @@ for i=0:length(measurements) % IMU velocity and bias priors if options.includeIMUFactors == 1 currentVelKey = symbol('v', 0); - currentVel = values.at(currentVelKey).vector; + currentVel = values.atPoint3(currentVelKey); graph.add(PriorFactorLieVector(currentVelKey, LieVector(currentVel), noiseModels.noiseVel)); currentBiasKey = symbol('b', 0); - currentBias = values.at(currentBiasKey); + currentBias = values.atPoint3(currentBiasKey); graph.add(PriorFactorConstantBias(currentBiasKey, currentBias, noiseModels.noisePriorBias)); end @@ -155,7 +155,7 @@ for i=0:length(measurements) if options.includeCameraFactors == 1 for j = 1:length(measurements(i).landmarks) cameraMeasurmentNoise = measurementNoise.cameraNoiseVector .* randn(2,1); - cameraPixelMeasurement = measurements(i).landmarks(j).vector; + cameraPixelMeasurement = measurements(i).landmarks(j); % Only add the measurement if it is in the image frame (based on calibration) if(cameraPixelMeasurement(1) > 0 && cameraPixelMeasurement(2) > 0 ... && cameraPixelMeasurement(1) < 2*metadata.camera.calibration.px ... diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m index 195b7ff69..3d8a9b5d2 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m @@ -40,7 +40,7 @@ if options.useRealData == 1 %% gt Between measurements if options.includeBetweenFactors == 1 && i > 0 - prevPose = values.at(currentPoseKey - 1); + prevPose = values.atPose3(currentPoseKey - 1); deltaPose = prevPose.between(currentPose); measurements(i).deltaVector = Pose3.Logmap(deltaPose); end @@ -65,7 +65,7 @@ if options.useRealData == 1 IMUdeltaPose = IMUPose1.between(IMUPose2); IMUdeltaPoseVector = Pose3.Logmap(IMUdeltaPose); IMUdeltaRotVector = IMUdeltaPoseVector(1:3); - IMUdeltaPositionVector = IMUPose2.translation.vector - IMUPose1.translation.vector; % translation in absolute frame + IMUdeltaPositionVector = IMUPose2.translation - IMUPose1.translation; % translation in absolute frame measurements(i).imu(j).deltaT = deltaT; @@ -88,7 +88,7 @@ if options.useRealData == 1 %% gt GPS measurements if options.includeGPSFactors == 1 && i > 0 - gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation.vector; + gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation; measurements(i).gpsPositionVector = gpsPositionVector; end diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m index 3f72f1215..2de1e1103 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m @@ -5,9 +5,9 @@ function [ finalPose, finalVelocityGlobal ] = integrateIMUTrajectory( ... import gtsam.*; imu2in1 = Rot3.Expmap(acc_omegaIMU(4:6) * deltaT); -accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))).vector; +accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))); -finalPosition = Point3(initialPoseGlobal.translation.vector ... +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m index 50b223060..bec2d760d 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m @@ -3,7 +3,7 @@ function [ finalPose, finalVelocityGlobal ] = integrateIMUTrajectory_bodyFrame( % Before integrating in the body frame we need to compensate for the Coriolis % effect -acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)).vector; +acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)); % after compensating for coriolis this will be essentially zero % since we are moving at constant body velocity @@ -16,8 +16,8 @@ finalPositionBody = velocity1Body * deltaT + 0.5 * acc_body * deltaT * deltaT; finalVelocityBody = velocity1Body + acc_body * deltaT; %% Express the integrated quantities in the global frame -finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)).vector() ); -finalPosition = initialPoseGlobal.translation().vector() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)).vector() ; +finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)) ); +finalPosition = initialPoseGlobal.translation() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)) ; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); % Include position and rotation in a pose finalPose = Pose3(finalRotation, Point3(finalPosition) ); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m index b919520ac..ea851315f 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m @@ -9,8 +9,8 @@ finalRotation = initialPoseGlobal.rotation.compose(imu2in1); intermediateRotation = initialPoseGlobal.rotation.compose( Rot3.Expmap(acc_omegaIMU(4:6) * deltaT/2 )); % Integrate positions (equation (1) in Lupton) -accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))).vector; -finalPosition = Point3(initialPoseGlobal.translation.vector ... +accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))); +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m index e51b622b0..a704342ae 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m @@ -6,16 +6,16 @@ import gtsam.*; % Rotation: R^1_2 body2in1 = Rot3.Expmap(omega1Body * deltaT); % Velocity 2 in frame 1: v^1_2 = R^1_2 v^2_2 -velocity2inertial = body2in1.rotate(Point3(velocity2Body)).vector; +velocity2inertial = body2in1.rotate(Point3(velocity2Body)); % Acceleration: a^1 = (v^1_2 - v^1_1)/dt accelBody1 = (velocity2inertial - velocity1Body) / deltaT; % Velocity 1 in frame W: v^W_1 = R^W_1 v^1_1 -initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)).vector; +initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)); % Acceleration in frame W: a^W = R^W_1 a^1 -accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)).vector; +accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)); -finalPosition = Point3(initialPose.translation.vector + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); +finalPosition = Point3(initialPose.translation + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPose.rotation.compose(body2in1); finalPose = Pose3(finalRotation, finalPosition); diff --git a/matlab/unstable_examples/+imuSimulator/test1onestep.m b/matlab/unstable_examples/+imuSimulator/test1onestep.m index 883569849..cb66d23d6 100644 --- a/matlab/unstable_examples/+imuSimulator/test1onestep.m +++ b/matlab/unstable_examples/+imuSimulator/test1onestep.m @@ -10,7 +10,7 @@ omega = [0;0;0.1]; velocity = [1;0;0]; R = Rot3.Expmap(omega * deltaT); -velocity2body = R.unrotate(Point3(velocity)).vector; +velocity2body = R.unrotate(Point3(velocity)); acc_omegaExpected = [-0.01; 0; 0; 0; 0; 0.1]; acc_omegaActual = imuSimulator.calculateIMUMeasurement(omega, omega, velocity, velocity2body, deltaT, Pose3(Rot3, Point3([1;0;0]))); disp('IMU measurement discrepancy:'); @@ -40,7 +40,7 @@ disp(acc_omegaActual - acc_omegaExpected); initialPose = Pose3; initialVelocity = velocity; finalPoseExpected = Pose3.Expmap([ omega; velocity ] * deltaT); -finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)).vector; +finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)); [ finalPoseActual, finalVelocityActual ] = imuSimulator.integrateTrajectory(initialPose, omega, velocity, velocity, deltaT); disp('Final pose discrepancy:'); disp(finalPoseExpected.between(finalPoseActual).matrix); diff --git a/matlab/unstable_examples/+imuSimulator/test2constglobal.m b/matlab/unstable_examples/+imuSimulator/test2constglobal.m index 19956d08a..6ab35d50b 100644 --- a/matlab/unstable_examples/+imuSimulator/test2constglobal.m +++ b/matlab/unstable_examples/+imuSimulator/test2constglobal.m @@ -21,12 +21,12 @@ positions = zeros(3, length(times)+1); i = 2; for t = times - velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)).vector; + velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)); R = Rot3.Expmap(omega * deltaT); - velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)).vector; + velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)); [ currentPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory(currentPoseGlobal, omega, velocity1body, velocity2body, deltaT); - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/test3constbody.m b/matlab/unstable_examples/+imuSimulator/test3constbody.m index b3ee2edfc..8ee14ab78 100644 --- a/matlab/unstable_examples/+imuSimulator/test3constbody.m +++ b/matlab/unstable_examples/+imuSimulator/test3constbody.m @@ -26,27 +26,27 @@ currentPoseGlobal = Pose3; currentVelocityGlobal = velocity; % Initial state (IMU) currentPoseGlobalIMU = Pose3; %currentPoseGlobal.compose(IMUinBody); -%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)).vector; % no Coriolis here? +%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)); % no Coriolis here? currentVelocityGlobalIMU = IMUinBody.rotation.unrotate( ... - Point3(velocity + cross(omega, IMUinBody.translation.vector))).vector; + Point3(velocity + cross(omega, IMUinBody.translation))); % Positions % body trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Expected IMU trajectory (from body trajectory and lever arm) positionsIMUe = zeros(3, length(times)+1); -positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMUe(1).p = positionsIMUe(:,1); posesIMUe(1).R = poses(1).R * IMUinBody.rotation.matrix; % Integrated IMU trajectory (from IMU measurements) positionsIMU = zeros(3, length(times)+1); -positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMU(1).p = positionsIMU(:,1); posesIMU(1).R = IMUinBody.compose(currentPoseGlobalIMU).rotation.matrix; @@ -62,9 +62,9 @@ for t = times currentPoseGlobalIMU, currentVelocityGlobalIMU, acc_omega, deltaT); % Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUe(:,i) = currentPoseGlobal.translation.vector + currentPoseGlobal.rotation.matrix * IMUinBody.translation.vector; - positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUe(:,i) = currentPoseGlobal.translation + currentPoseGlobal.rotation.matrix * IMUinBody.translation; + positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation; poses(i).p = positions(:,i); posesIMUe(i).p = positionsIMUe(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/test4circle.m b/matlab/unstable_examples/+imuSimulator/test4circle.m index 22ee175dd..ab2c546db 100644 --- a/matlab/unstable_examples/+imuSimulator/test4circle.m +++ b/matlab/unstable_examples/+imuSimulator/test4circle.m @@ -34,19 +34,19 @@ currentVelocityGlobalIMUbody = currentVelocityGlobal; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -72,9 +72,9 @@ for t = times currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/FlightCameraTransformIMU.m b/matlab/unstable_examples/FlightCameraTransformIMU.m index 9a8a27344..d2f2bc34d 100644 --- a/matlab/unstable_examples/FlightCameraTransformIMU.m +++ b/matlab/unstable_examples/FlightCameraTransformIMU.m @@ -120,7 +120,7 @@ for i=1:size(trajectory)-1 end % current ground-truth position indicator - h_cursor = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'*'); + h_cursor = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'*'); camera_pose = pose.compose(camera_transform); @@ -198,7 +198,7 @@ for i=1:size(trajectory)-1 if ~result.exists(lKey) p = landmarks.atPoint3(lKey); n = normrnd(0,landmark_noise,3,1); - noisy_landmark = Point3(p.x()+n(1),p.y()+n(2),p.z()+n(3)); + noisy_landmark = p + n; initial.insert(lKey, noisy_landmark); % and add a prior since its position is known @@ -245,32 +245,33 @@ for i=1:size(trajectory)-1 initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = result.at(currentVelKey); - currentBias = result.at(currentBiasKey); + currentVelocityGlobal = result.atPoint3(currentVelKey); + currentBias = result.atConstantBias(currentBiasKey); %% plot current pose result - isam_pose = result.at(xKey); + isam_pose = result.atPose3(xKey); pose_t = isam_pose.translation(); if exist('h_result','var') delete(h_result); end - h_result = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'^b', 'MarkerSize', 10); + h_result = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'^b', 'MarkerSize', 10); title(a1, sprintf('Step %d', i)); if exist('h_text1(1)', 'var') delete(h_text1(1)); % delete(h_text2(1)); end - ty = result.at(transformKey).translation().y(); - K_estimate = result.at(calibrationKey); + t = result.atPose3(transformKey).translation(); + ty = t(2); + K_estimate = result.atCal3_S2(calibrationKey); K_errors = K.localCoordinates(K_estimate); - camera_transform_estimate = result.at(transformKey); + camera_transform_estimate = result.atPose3(transformKey); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); % h_text1 = text(-600,0,0,sprintf('Y-Transform(0.0): %0.2f',ty)); text(0,1300,0,sprintf('Calibration and IMU-cam transform errors:')); @@ -304,7 +305,7 @@ for i=1:size(trajectory)-1 end %% print out final camera transform and write video -result.at(transformKey); +result.atPose3(transformKey); if(write_video) close(videoObj); end \ No newline at end of file diff --git a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m index 4557d711f..9796a9737 100644 --- a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m +++ b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m @@ -53,7 +53,7 @@ y_shift = Point3(0,0.5,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -134,7 +134,7 @@ for i=1:steps end if i == 2 fg.add(PriorFactorPose3(2, Pose3(Rot3(),Point3(1,0,0)),pose_cov)); - fg.add(PriorFactorLieVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); + fg.add(PriorFactorVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); fg.add(PriorFactorConstantBias(currentBiasKey, currentBias, sigma_init_b)); end if i > 1 @@ -144,7 +144,7 @@ for i=1:steps step = move_circle; end - initial.insert(i,result.at(i-1).compose(step)); + initial.insert(i,result.atPose3(i-1).compose(step)); fg.add(BetweenFactorPose3(i-1,i, step, covariance)); deltaT = 1; @@ -158,10 +158,13 @@ for i=1:steps [ currentIMUPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory( ... currentIMUPoseGlobal, omega, velocity, velocity, deltaT); - - currentSummarizedMeasurement = gtsam.ImuFactorPreintegratedMeasurements( ... - currentBias, IMU_metadata.AccelerometerSigma.^2 * eye(3), ... - IMU_metadata.GyroscopeSigma.^2 * eye(3), IMU_metadata.IntegrationSigma.^2 * eye(3)); + + params = gtsam.PreintegrationParams.MakeSharedD(9.81); + params.setAccelerometerCovariance(IMU_metadata.AccelerometerSigma.^2 * eye(3)); + params.setGyroscopeCovariance(IMU_metadata.GyroscopeSigma.^2 * eye(3)); + params.setIntegrationCovariance(IMU_metadata.IntegrationSigma.^2 * eye(3)); + currentSummarizedMeasurement = gtsam.PreintegratedImuMeasurements( ... + params, currentBias); accMeas = acc_omega(1:3)-g; omegaMeas = acc_omega(4:6); @@ -171,7 +174,7 @@ for i=1:steps fg.add(ImuFactor( ... i-1, currentVelKey-1, ... i, currentVelKey, ... - currentBiasKey, currentSummarizedMeasurement, g, w_coriolis)); + currentBiasKey, currentSummarizedMeasurement)); % Bias evolution as given in the IMU metadata fg.add(BetweenFactorConstantBias(currentBiasKey-1, currentBiasKey, imuBias.ConstantBias(zeros(3,1), zeros(3,1)), ... @@ -204,8 +207,8 @@ for i=1:steps initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = isam.calculateEstimate(currentVelKey); - currentBias = isam.calculateEstimate(currentBiasKey); + currentVelocityGlobal = isam.calculateEstimate().atVector(currentVelKey); + currentBias = isam.calculateEstimate().atConstantBias(currentBiasKey); %% Compute some marginals marginal = isam.marginalCovariance(calibrationKey); @@ -249,10 +252,10 @@ for i=1:steps gtsam.plotPose3(currentIMUPoseGlobal, [], 2); %% plot results - result_camera_transform = result.at(transformKey); + result_camera_transform = result.atPose3(transformKey); for j=1:i - gtsam.plotPose3(result.at(j),[],0.5); - gtsam.plotPose3(result.at(j).compose(result_camera_transform),[],0.5); + gtsam.plotPose3(result.atPose3(j),[],0.5); + gtsam.plotPose3(result.atPose3(j).compose(result_camera_transform),[],0.5); end xlabel('x (m)'); @@ -265,14 +268,15 @@ for i=1:steps % axis equal for l=101:100+nrPoints - plotPoint3(result.at(l),'g'); + plotPoint3(result.atPoint3(l),'g'); end - ty = result.at(transformKey).translation().y(); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); - px = result.at(calibrationKey).px(); - py = result.at(calibrationKey).py(); + t = result.atPose3(transformKey).translation(); + ty = t(2); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); + px = result.atCal3_S2(calibrationKey).px(); + py = result.atCal3_S2(calibrationKey).py(); text(1,5,5,sprintf('Y-Transform(0.0): %0.2f',ty)); text(1,5,3,sprintf('fx(900): %.0f',fx)); text(1,5,1,sprintf('fy(900): %.0f',fy)); @@ -342,10 +346,10 @@ end fprintf('Cheirality Exception count: %d\n', cheirality_exception_count); disp('Transform after optimization'); -result.at(transformKey) +result.atPose3(transformKey) disp('Calibration after optimization'); -result.at(calibrationKey) +result.atCal3_S2(calibrationKey) disp('Bias after optimization'); currentBias diff --git a/matlab/unstable_examples/TransformProjectionFactorExample.m b/matlab/unstable_examples/TransformProjectionFactorExample.m index 79a96209d..f6a896cff 100644 --- a/matlab/unstable_examples/TransformProjectionFactorExample.m +++ b/matlab/unstable_examples/TransformProjectionFactorExample.m @@ -33,7 +33,7 @@ y_shift = Point3(0,1,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); diff --git a/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m b/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m index ca5b70c62..88f6cc63c 100644 --- a/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m +++ b/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m @@ -47,7 +47,7 @@ y_shift = Point3(0,1,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -146,7 +146,8 @@ for i=1:20 plotPoint3(result.atPoint3(l),'g'); end - ty = result.atPose3(1000).translation().y(); + t = result.atPose3(1000).translation(); + ty = t(2); text(5,5,5,sprintf('Y-Transform: %0.2g',ty)); if(write_video) diff --git a/matlab/unstable_examples/plot_projected_landmarks.m b/matlab/unstable_examples/plot_projected_landmarks.m index 6b8101844..30e222016 100644 --- a/matlab/unstable_examples/plot_projected_landmarks.m +++ b/matlab/unstable_examples/plot_projected_landmarks.m @@ -25,9 +25,9 @@ for i = 0:measurement_keys.size-1 key_index = gtsam.symbolIndex(key); p = landmarks.atPoint3(gtsam.symbol('l',key_index)); - x(i+1) = p.x; - y(i+1) = p.y; - z(i+1) = p.z; + x(i+1) = p(1); + y(i+1) = p(2); + z(i+1) = p(3); end diff --git a/matlab/unstable_examples/project_landmarks.m b/matlab/unstable_examples/project_landmarks.m index aaccc9248..3bccef94b 100644 --- a/matlab/unstable_examples/project_landmarks.m +++ b/matlab/unstable_examples/project_landmarks.m @@ -11,9 +11,9 @@ function [ measurements ] = project_landmarks( pose, landmarks, K ) z = camera.project(landmarks.atPoint3(symbol('l',i))); % check bounding box - if z.x < 0 || z.x > 1280 + if z(1) < 0 || z(1) > 1280 continue - elseif z.y < 0 || z.y > 960 + elseif z(2) < 0 || z(2) > 960 continue end diff --git a/matlab/unstable_examples/testTSAMFactors.m b/matlab/unstable_examples/testTSAMFactors.m index 5cfd0aa80..2d1de5855 100644 --- a/matlab/unstable_examples/testTSAMFactors.m +++ b/matlab/unstable_examples/testTSAMFactors.m @@ -50,8 +50,8 @@ result = optimizer.optimize(); % Check result CHECK('error',result.atPose2(100).equals(b1,1e-5)) CHECK('error',result.atPose2(10).equals(origin,1e-5)) -CHECK('error',result.atPoint2(1).equals(Point2(0,1),1e-5)) -CHECK('error',result.atPoint2(2).equals(Point2(0,1),1e-5)) +CHECK('error',result.atPoint2(1) - Point2(0,1) < 1e-5) +CHECK('error',result.atPoint2(2) - Point2(0,1) < 1e-5) CHECK('error',result.atPose2(20).equals(origin,1e-5)) CHECK('error',result.atPose2(200).equals(b2,1e-5)) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100644 index 000000000..315f81f4e --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,107 @@ +set(GTSAM_PYTHON_BUILD_DIRECTORY ${PROJECT_BINARY_DIR}/python) + +if (NOT GTSAM_BUILD_PYTHON) + return() +endif() + +# Generate setup.py. +file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) +configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in + ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py) + +set(WRAP_USE_CUSTOM_PYTHON_LIBRARY ${GTSAM_USE_CUSTOM_PYTHON_LIBRARY}) +set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION}) + +include(PybindWrap) + +add_custom_target(gtsam_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam/gtsam.i") +add_custom_target(gtsam_unstable_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i") + +# ignoring the non-concrete types (type aliases) +set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector) + +pybind_wrap(gtsam_py # target + ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header + "gtsam.cpp" # generated_cpp + "gtsam" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam/gtsam.tpl + gtsam # libs + "gtsam;gtsam_header" # dependencies + ON # use_boost + ) + +set_target_properties(gtsam_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + +set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) + +# Symlink all tests .py files to build folder. +create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" + "${GTSAM_MODULE_PATH}") + +if(GTSAM_UNSTABLE_BUILD_PYTHON) +set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector + gtsam::FixedLagSmootherKeyTimestampMapValue) +pybind_wrap(gtsam_unstable_py # target + ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header + "gtsam_unstable.cpp" # generated_cpp + "gtsam_unstable" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam_unstable/gtsam_unstable.tpl + gtsam_unstable # libs + "gtsam_unstable;gtsam_unstable_header" # dependencies + ON # use_boost + ) + +set_target_properties(gtsam_unstable_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam_unstable" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + +set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) + +# Symlink all tests .py files to build folder. +create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" + "${GTSAM_UNSTABLE_MODULE_PATH}") +endif() + +set(GTSAM_PYTHON_INSTALL_TARGET python-install) +add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} + COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py install + DEPENDS gtsam_py gtsam_unstable_py + WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) diff --git a/python/README.md b/python/README.md new file mode 100644 index 000000000..b1a3a865f --- /dev/null +++ b/python/README.md @@ -0,0 +1,65 @@ +# README + +# Python Wrapper + +This is the Python wrapper around the GTSAM C++ library. We use our custom [wrap library](https://github.com/borglab/wrap) to generate the bindings to the underlying C++ code. + +## Requirements + +- If you want to build the GTSAM python library for a specific python version (eg 3.6), + use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used. +- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment), + then the environment should be active while building GTSAM. +- This wrapper needs `pyparsing(>=2.4.2)`, and `numpy(>=1.11.0)`. These can be installed as follows: + + ```bash + pip install -r /python/requirements.txt + ``` + +## Install + +- Run cmake with the `GTSAM_BUILD_PYTHON` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory `/python`. + +- Build GTSAM and the wrapper with `make` (or `ninja` if you use `-GNinja`). + +- To install, simply run `make python-install` (`ninja python-install`). + - The same command can be used to install into a virtual environment if it is active. + - **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory. + +- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly. + +## Unit Tests + +The Python toolbox also has a small set of unit tests located in the +test directory. To run them: + + ```bash + cd /python/gtsam/tests + python -m unittest discover + ``` + +## Utils + +TODO + +## Examples + +TODO + +## Writing Your Own Scripts + +See the tests for examples. + +### Some Important Notes: + +- Vector/Matrix: + + - GTSAM expects double-precision floating point vectors and matrices. + Hence, you should pass numpy matrices with `dtype=float`, or `float64`, to avoid any conversion needed. + - Also, GTSAM expects _column-major_ matrices, unlike the default storage + scheme in numpy. But this is only performance-related as `pybind11` should translate them when needed. However, this will result a copy if your matrix is not in the expected type + and storage order. + +## Wrapping Custom GTSAM-based Project + +Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python). diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py new file mode 100644 index 000000000..e6fd8c9c8 --- /dev/null +++ b/python/gtsam/__init__.py @@ -0,0 +1,27 @@ +from .gtsam import * + + +def _init(): + """This function is to add shims for the long-gone Point2 and Point3 types""" + + import numpy as np + + global Point2 # export function + + def Point2(x=0, y=0): + """Shim for the deleted Point2 type.""" + return np.array([x, y], dtype=float) + + global Point3 # export function + + def Point3(x=0, y=0, z=0): + """Shim for the deleted Point3 type.""" + return np.array([x, y, z], dtype=float) + + # for interactive debugging + if __name__ == "__main__": + # we want all definitions accessible + globals().update(locals()) + + +_init() diff --git a/cython/gtsam/examples/DogLegOptimizerExample.py b/python/gtsam/examples/DogLegOptimizerExample.py similarity index 95% rename from cython/gtsam/examples/DogLegOptimizerExample.py rename to python/gtsam/examples/DogLegOptimizerExample.py index 776ceedc4..26f4fef84 100644 --- a/cython/gtsam/examples/DogLegOptimizerExample.py +++ b/python/gtsam/examples/DogLegOptimizerExample.py @@ -35,17 +35,17 @@ def run(args): graph = gtsam.NonlinearFactorGraph() # Priors - prior = gtsam.noiseModel_Isotropic.Sigma(3, 1) + prior = gtsam.noiseModel.Isotropic.Sigma(3, 1) graph.add(gtsam.PriorFactorPose2(11, T11, prior)) graph.add(gtsam.PriorFactorPose2(21, T21, prior)) # Odometry - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model)) graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model)) # Range - model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01) + model_rho = gtsam.noiseModel.Isotropic.Sigma(1, 0.01) graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho)) params = gtsam.DoglegParams() diff --git a/cython/gtsam/examples/GPSFactorExample.py b/python/gtsam/examples/GPSFactorExample.py similarity index 93% rename from cython/gtsam/examples/GPSFactorExample.py rename to python/gtsam/examples/GPSFactorExample.py index 493a07725..0bc0d1bf3 100644 --- a/cython/gtsam/examples/GPSFactorExample.py +++ b/python/gtsam/examples/GPSFactorExample.py @@ -26,8 +26,8 @@ lon0 = -84.30626 h0 = 274 # Create noise models -GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) -PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25) +GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) +PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/ImuFactorExample.py b/python/gtsam/examples/ImuFactorExample.py similarity index 87% rename from cython/gtsam/examples/ImuFactorExample.py rename to python/gtsam/examples/ImuFactorExample.py index 06742fcd1..eec7c5ebd 100644 --- a/cython/gtsam/examples/ImuFactorExample.py +++ b/python/gtsam/examples/ImuFactorExample.py @@ -18,9 +18,7 @@ import math import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import B, V, X from gtsam.utils.plot import plot_pose3 from mpl_toolkits.mplot3d import Axes3D @@ -36,8 +34,8 @@ class ImuFactorExample(PreintegrationExample): def __init__(self, twist_scenario="sick_twist"): self.velocity = np.array([2, 0, 0]) - self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Choose one of these twists to change scenario: twist_scenarios = dict( @@ -50,7 +48,7 @@ class ImuFactorExample(PreintegrationExample): accBias = np.array([-0.3, 0.1, 0.2]) gyroBias = np.array([0.1, 0.3, -0.1]) - bias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) dt = 1e-2 super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], @@ -69,6 +67,7 @@ class ImuFactorExample(PreintegrationExample): # initialize data structure for pre-integrated IMU measurements pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) + T = 12 num_poses = T # assumes 1 factor per second initial = gtsam.Values() initial.insert(BIAS_KEY, self.actualBias) @@ -110,11 +109,12 @@ class ImuFactorExample(PreintegrationExample): pim.resetIntegration() rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3)*0.1) - translationNoise = gtsam.Point3(np.random.randn(3)*1) + translationNoise = gtsam.Point3(*np.random.randn(3)*1) poseNoise = gtsam.Pose3(rotationNoise, translationNoise) actual_state_i = self.scenario.navState(t + self.dt) - print("Actual state at {0}:\n{1}".format(t+self.dt, actual_state_i)) + print("Actual state at {0}:\n{1}".format( + t+self.dt, actual_state_i)) noisy_state_i = gtsam.NavState( actual_state_i.pose().compose(poseNoise), @@ -124,7 +124,7 @@ class ImuFactorExample(PreintegrationExample): initial.insert(V(i+1), noisy_state_i.velocity()) i += 1 - # add prior on end + # add priors on end # self.addPrior(num_poses - 1, graph) initial.print_("Initial values:") @@ -140,7 +140,8 @@ class ImuFactorExample(PreintegrationExample): if compute_covariances: # Calculate and print marginal covariances marginals = gtsam.Marginals(graph, result) - print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY)) + print("Covariance on bias:\n", + marginals.marginalCovariance(BIAS_KEY)) for i in range(num_poses): print("Covariance on pose {}:\n{}\n".format( i, marginals.marginalCovariance(X(i)))) @@ -157,7 +158,7 @@ class ImuFactorExample(PreintegrationExample): gtsam.utils.plot.set_axes_equal(POSES_FIG+1) - print("Bias Values", result.atimuBias_ConstantBias(BIAS_KEY)) + print("Bias Values", result.atConstantBias(BIAS_KEY)) plt.ioff() plt.show() @@ -170,8 +171,10 @@ if __name__ == '__main__': choices=("zero_twist", "forward_twist", "loop_twist", "sick_twist")) parser.add_argument("--time", "-T", default=12, type=int, help="Total time in seconds") - parser.add_argument("--compute_covariances", default=False, action='store_true') + parser.add_argument("--compute_covariances", + default=False, action='store_true') parser.add_argument("--verbose", default=False, action='store_true') args = parser.parse_args() - ImuFactorExample(args.twist_scenario).run(args.time, args.compute_covariances, args.verbose) + ImuFactorExample(args.twist_scenario).run( + args.time, args.compute_covariances, args.verbose) diff --git a/cython/gtsam/examples/ImuFactorISAM2Example.py b/python/gtsam/examples/ImuFactorISAM2Example.py similarity index 88% rename from cython/gtsam/examples/ImuFactorISAM2Example.py rename to python/gtsam/examples/ImuFactorISAM2Example.py index 4187fe878..bb90b95bf 100644 --- a/cython/gtsam/examples/ImuFactorISAM2Example.py +++ b/python/gtsam/examples/ImuFactorISAM2Example.py @@ -18,9 +18,7 @@ from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorConstantBias, PriorFactorPose3, PriorFactorVector, Rot3, Values) -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import B, V, X from gtsam.utils import plot @@ -30,7 +28,7 @@ def vector3(x, y, z): g = 9.81 -kGravity = vector3(0, 0, -g) +n_gravity = vector3(0, 0, -g) def preintegration_parameters(): @@ -44,7 +42,7 @@ def preintegration_parameters(): PARAMS.setUse2ndOrderCoriolis(False) PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) - BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1) + BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), Point3(0.05, -0.10, 0.20)) @@ -61,8 +59,6 @@ def get_camera(radius): def get_scenario(radius, pose_0, angular_velocity, delta_t): """Create the set of ground-truth landmarks and poses""" - - angular_velocity_vector = vector3(0, -angular_velocity, 0) linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) scenario = ConstantTwistScenario( @@ -97,18 +93,18 @@ def IMU_example(): # Add a prior on pose x0. This indirectly specifies where the origin is. # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw - noise = gtsam.noiseModel_Diagonal.Sigmas( + noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) graph.push_back(PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors biasKey = B(0) - biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(), + biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias.ConstantBias(), biasnoise) graph.push_back(biasprior) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) - velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) + velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) @@ -134,14 +130,14 @@ def IMU_example(): if i % 5 == 0: biasKey += 1 factor = BetweenFactorConstantBias( - biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE) + biasKey - 1, biasKey, gtsam.imuBias.ConstantBias(), BIAS_COVARIANCE) graph.add(factor) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) # Predict acceleration and gyro measurements in (actual) body frame nRb = scenario.rotation(t).matrix() bRn = np.transpose(nRb) - measuredAcc = scenario.acceleration_b(t) - np.dot(bRn, kGravity) + measuredAcc = scenario.acceleration_b(t) - np.dot(bRn, n_gravity) measuredOmega = scenario.omega_b(t) accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t) diff --git a/cython/gtsam/examples/OdometryExample.py b/python/gtsam/examples/OdometryExample.py similarity index 94% rename from cython/gtsam/examples/OdometryExample.py rename to python/gtsam/examples/OdometryExample.py index e778e3f85..8b519ce9a 100644 --- a/cython/gtsam/examples/OdometryExample.py +++ b/python/gtsam/examples/OdometryExample.py @@ -21,8 +21,8 @@ import matplotlib.pyplot as plt import gtsam.utils.plot as gtsam_plot # Create noise models -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/PlanarManipulatorExample.py b/python/gtsam/examples/PlanarManipulatorExample.py similarity index 98% rename from cython/gtsam/examples/PlanarManipulatorExample.py rename to python/gtsam/examples/PlanarManipulatorExample.py index e42ae09d7..9af4f7fcc 100644 --- a/cython/gtsam/examples/PlanarManipulatorExample.py +++ b/python/gtsam/examples/PlanarManipulatorExample.py @@ -167,13 +167,11 @@ class ThreeLinkArm(object): axes = fig.gca() sXl1 = Pose2(0, 0, math.radians(90)) - t = sXl1.translation() - p1 = np.array([t.x(), t.y()]) + p1 = sXl1.translation() gtsam_plot.plot_pose2_on_axes(axes, sXl1) def plot_line(p, g, color): - t = g.translation() - q = np.array([t.x(), t.y()]) + q = g.translation() line = np.append(p[np.newaxis], q[np.newaxis], axis=0) axes.plot(line[:, 0], line[:, 1], color) return q diff --git a/cython/gtsam/examples/PlanarSLAMExample.py b/python/gtsam/examples/PlanarSLAMExample.py similarity index 91% rename from cython/gtsam/examples/PlanarSLAMExample.py rename to python/gtsam/examples/PlanarSLAMExample.py index c84f0f834..5ffdf048d 100644 --- a/cython/gtsam/examples/PlanarSLAMExample.py +++ b/python/gtsam/examples/PlanarSLAMExample.py @@ -13,15 +13,15 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) from __future__ import print_function -import gtsam import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X + +import gtsam +from gtsam.symbol_shorthand import X, L # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/Pose2SLAMExample.py b/python/gtsam/examples/Pose2SLAMExample.py similarity index 96% rename from cython/gtsam/examples/Pose2SLAMExample.py rename to python/gtsam/examples/Pose2SLAMExample.py index 680f2209f..2569f0953 100644 --- a/cython/gtsam/examples/Pose2SLAMExample.py +++ b/python/gtsam/examples/Pose2SLAMExample.py @@ -27,10 +27,9 @@ def vector3(x, y, z): """Create 3d double numpy array.""" return np.array([x, y, z], dtype=np.float) - # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) # 1. Create a factor graph container and add factors to it graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/Pose2SLAMExample_g2o.py b/python/gtsam/examples/Pose2SLAMExample_g2o.py similarity index 96% rename from cython/gtsam/examples/Pose2SLAMExample_g2o.py rename to python/gtsam/examples/Pose2SLAMExample_g2o.py index 09114370d..b2ba9c5bc 100644 --- a/cython/gtsam/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose2SLAMExample_g2o.py @@ -53,7 +53,7 @@ graph, initial = gtsam.readG2o(g2oFile, is3D) assert args.kernel == "none", "Supplied kernel type is not yet implemented" # Add prior on the pose having index (key) = 0 -priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) +priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) params = gtsam.GaussNewtonParams() @@ -82,7 +82,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.utilities_extractPose2(result) + resultPoses = gtsam.utilities.extractPose2(result) for i in range(resultPoses.shape[0]): plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) plt.show() diff --git a/cython/gtsam/examples/Pose3SLAMExample_g2o.py b/python/gtsam/examples/Pose3SLAMExample_g2o.py similarity index 93% rename from cython/gtsam/examples/Pose3SLAMExample_g2o.py rename to python/gtsam/examples/Pose3SLAMExample_g2o.py index 3c1a54f7b..82b3bda98 100644 --- a/cython/gtsam/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose3SLAMExample_g2o.py @@ -39,11 +39,11 @@ is3D = True graph, initial = gtsam.readG2o(g2oFile, is3D) # Add Prior on the first key -priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, +priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4)) print("Adding prior to g2o file ") -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) params = gtsam.GaussNewtonParams() @@ -65,7 +65,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.utilities_allPose3s(result) + resultPoses = gtsam.utilities.allPose3s(result) for i in range(resultPoses.size()): plot.plot_pose3(1, resultPoses.atPose3(i)) plt.show() diff --git a/cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py similarity index 91% rename from cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py rename to python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py index 02c696905..2b2c5f991 100644 --- a/cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py +++ b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -24,9 +24,9 @@ is3D = True graph, initial = gtsam.readG2o(g2oFile, is3D) # Add prior on the first key. TODO: assumes first key ios z -priorModel = gtsam.noiseModel_Diagonal.Variances( +priorModel = gtsam.noiseModel.Diagonal.Variances( np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) # Initializing Pose3 - chordal relaxation" diff --git a/cython/gtsam/examples/PreintegrationExample.py b/python/gtsam/examples/PreintegrationExample.py similarity index 97% rename from cython/gtsam/examples/PreintegrationExample.py rename to python/gtsam/examples/PreintegrationExample.py index 958221ac9..b54919bec 100644 --- a/cython/gtsam/examples/PreintegrationExample.py +++ b/python/gtsam/examples/PreintegrationExample.py @@ -10,12 +10,11 @@ A script validating the Preintegration of IMU measurements import math +import gtsam import matplotlib.pyplot as plt import numpy as np -from mpl_toolkits.mplot3d import Axes3D - -import gtsam from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D IMU_FIG = 1 POSES_FIG = 2 @@ -68,7 +67,7 @@ class PreintegrationExample(object): else: accBias = np.array([0, 0.1, 0]) gyroBias = np.array([0, 0, 0]) - self.actualBias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias) self.runner = gtsam.ScenarioRunner( self.scenario, self.params, self.dt, self.actualBias) @@ -114,7 +113,7 @@ class PreintegrationExample(object): actualPose = self.scenario.pose(t) plot_pose3(POSES_FIG, actualPose, scale) t = actualPose.translation() - self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim]) + self.maxDim = max([max(np.abs(t)), self.maxDim]) ax = plt.gca() ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim) diff --git a/cython/gtsam/examples/README.md b/python/gtsam/examples/README.md similarity index 84% rename from cython/gtsam/examples/README.md rename to python/gtsam/examples/README.md index 99bce00e2..e998e4dcd 100644 --- a/cython/gtsam/examples/README.md +++ b/python/gtsam/examples/README.md @@ -1,17 +1,12 @@ -These examples are almost identical to the old handwritten python wrapper -examples. However, there are just some slight name changes, for example -`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc... -Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthand_B(0)` or `B(0)` if we use python aliasing. - # Porting Progress | C++ Example Name | Ported | |-------------------------------------------------------|--------| | CameraResectioning | | | CreateSFMExampleData | | -| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython | -| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython | -| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython | +| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through Python | +| easyPoint2KalmanFilter | ExtendedKalmanFilter not yet exposed through Python | +| elaboratePoint2KalmanFilter | GaussianSequentialSolver not yet exposed through Python | | ImuFactorExample2 | X | | ImuFactorsExample | | | ISAM2Example_SmartFactor | | @@ -25,7 +20,7 @@ Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthan | Pose2SLAMExample_g2o | X | | Pose2SLAMExample_graph | | | Pose2SLAMExample_graphviz | | -| Pose2SLAMExample_lago | lago not exposed through cython | +| Pose2SLAMExample_lago | lago not yet exposed through Python | | Pose2SLAMStressTest | | | Pose2SLAMwSPCG | | | Pose3SLAMExample_changeKeys | | @@ -47,11 +42,11 @@ Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthan | StereoVOExample | | | StereoVOExample_large | | | TimeTBB | | -| UGM_chain | discrete functionality not exposed | -| UGM_small | discrete functionality not exposed | +| UGM_chain | discrete functionality not yet exposed | +| UGM_small | discrete functionality not yet exposed | | VisualISAM2Example | X | | VisualISAMExample | X | Extra Examples (with no C++ equivalent) - PlanarManipulatorExample -- SFMData +- SFMData \ No newline at end of file diff --git a/cython/gtsam/examples/SFMExample.py b/python/gtsam/examples/SFMExample.py similarity index 91% rename from cython/gtsam/examples/SFMExample.py rename to python/gtsam/examples/SFMExample.py index e02def2f9..f0c4c82ba 100644 --- a/cython/gtsam/examples/SFMExample.py +++ b/python/gtsam/examples/SFMExample.py @@ -13,14 +13,15 @@ from __future__ import print_function import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X +from gtsam import symbol_shorthand +L = symbol_shorthand.L +X = symbol_shorthand.X + from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, +from gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, Marginals, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, - Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, - SimpleCamera, Values) + Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, Values) from gtsam.utils import plot @@ -56,7 +57,7 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + measurement_noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -69,7 +70,7 @@ def main(): # Add a prior on pose x1. This indirectly specifies where the origin is. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) @@ -85,7 +86,7 @@ def main(): # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) graph.print_('Factor Graph:\n') @@ -97,7 +98,7 @@ def main(): transformed_pose = pose.retract(0.1*np.random.randn(6,1)) initial_estimate.insert(X(i), transformed_pose) for j, point in enumerate(points): - transformed_point = Point3(point.vector() + 0.1*np.random.randn(3)) + transformed_point = point + 0.1*np.random.randn(3) initial_estimate.insert(L(j), transformed_point) initial_estimate.print_('Initial Estimates:\n') diff --git a/cython/gtsam/examples/SFMdata.py b/python/gtsam/examples/SFMdata.py similarity index 92% rename from cython/gtsam/examples/SFMdata.py rename to python/gtsam/examples/SFMdata.py index c586f7e52..6ac9c5726 100644 --- a/cython/gtsam/examples/SFMdata.py +++ b/python/gtsam/examples/SFMdata.py @@ -33,7 +33,8 @@ def createPoses(K): poses = [] for theta in angles: position = gtsam.Point3(radius*np.cos(theta), - radius*np.sin(theta), height) + radius*np.sin(theta), + height) camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K) poses.append(camera.pose()) return poses diff --git a/cython/gtsam/examples/SimpleRotation.py b/python/gtsam/examples/SimpleRotation.py similarity index 96% rename from cython/gtsam/examples/SimpleRotation.py rename to python/gtsam/examples/SimpleRotation.py index 4e82d3778..0fef261f8 100644 --- a/cython/gtsam/examples/SimpleRotation.py +++ b/python/gtsam/examples/SimpleRotation.py @@ -10,10 +10,9 @@ This example will perform a relatively trivial optimization on a single variable with a single factor. """ -import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X - +import gtsam +from gtsam.symbol_shorthand import X def main(): """ @@ -33,7 +32,7 @@ def main(): """ prior = gtsam.Rot2.fromAngle(np.deg2rad(30)) prior.print_('goal angle') - model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) + model = gtsam.noiseModel.Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) key = X(1) factor = gtsam.PriorFactorRot2(key, prior, model) diff --git a/cython/gtsam/examples/VisualISAM2Example.py b/python/gtsam/examples/VisualISAM2Example.py similarity index 94% rename from cython/gtsam/examples/VisualISAM2Example.py rename to python/gtsam/examples/VisualISAM2Example.py index 49e6ca95c..bacf510ec 100644 --- a/cython/gtsam/examples/VisualISAM2Example.py +++ b/python/gtsam/examples/VisualISAM2Example.py @@ -17,8 +17,7 @@ import gtsam import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import L, X from gtsam.examples import SFMdata from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 @@ -64,7 +63,7 @@ def visual_ISAM2_example(): K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma( + measurement_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks @@ -110,12 +109,12 @@ def visual_ISAM2_example(): # at least twice before adding it to iSAM. if i == 0: # Add a prior on pose x0 - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array( + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array( [0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise)) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) graph.push_back(gtsam.PriorFactorPoint3( L(0), points[0], point_noise)) # add directly to graph @@ -123,7 +122,7 @@ def visual_ISAM2_example(): # Intentionally initialize the variables off from the ground truth for j, point in enumerate(points): initial_estimate.insert(L(j), gtsam.Point3( - point.x()-0.25, point.y()+0.20, point.z()+0.15)) + point[0]-0.25, point[1]+0.20, point[2]+0.15)) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/cython/gtsam/examples/VisualISAMExample.py b/python/gtsam/examples/VisualISAMExample.py similarity index 77% rename from cython/gtsam/examples/VisualISAMExample.py rename to python/gtsam/examples/VisualISAMExample.py index 5cc37867b..f99d3f3e6 100644 --- a/cython/gtsam/examples/VisualISAMExample.py +++ b/python/gtsam/examples/VisualISAMExample.py @@ -15,13 +15,11 @@ from __future__ import print_function import numpy as np import gtsam from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, - NonlinearFactorGraph, NonlinearISAM, Point3, Pose3, - PriorFactorPoint3, PriorFactorPose3, Rot3, - PinholeCameraCal3_S2, Values) -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X - +from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, + NonlinearFactorGraph, NonlinearISAM, Pose3, + PriorFactorPoint3, PriorFactorPose3, Rot3, + PinholeCameraCal3_S2, Values, Point3) +from gtsam.symbol_shorthand import X, L def main(): """ @@ -34,7 +32,8 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + camera_noise = gtsam.noiseModel.Isotropic.Sigma( + 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -55,11 +54,13 @@ def main(): # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) - factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K) + factor = GenericProjectionFactorCal3_S2( + measurement, camera_noise, X(i), L(j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth - noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) + noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), + t=Point3(0.05, -0.10, 0.20)) initial_xi = pose.compose(noise) # Add an initial guess for the current pose @@ -71,12 +72,13 @@ def main(): # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) @@ -84,8 +86,8 @@ def main(): noise = np.array([-0.25, 0.20, 0.15]) for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth - initial_lj = points[j].vector() + noise - initial_estimate.insert(L(j), Point3(initial_lj)) + initial_lj = points[j] + noise + initial_estimate.insert(L(j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/cython/gtsam/examples/__init__.py b/python/gtsam/examples/__init__.py similarity index 100% rename from cython/gtsam/examples/__init__.py rename to python/gtsam/examples/__init__.py diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl new file mode 100644 index 000000000..634a81e90 --- /dev/null +++ b/python/gtsam/gtsam.tpl @@ -0,0 +1,48 @@ +/** + * @file gtsam.cpp + * @brief The auto-generated wrapper C++ source code. + * @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar + * @date Aug. 18, 2020 + * + * ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! ** + */ + +// Include relevant boost libraries required by GTSAM +{include_boost} + +#include +#include +#include +#include "gtsam/config.h" +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +// These are the included headers listed in `gtsam.i` +{includes} +#include + +// Export classes for serialization +{boost_class_export} + +// Holder type for pybind11 +{hoder_type} + +// Preamble for STL classes +// TODO(fan): make this automatic +#include "python/gtsam/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + +{wrapped_namespace} + +// Specializations for STL classes +// TODO(fan): make this automatic +#include "python/gtsam/specializations.h" + +}} + diff --git a/python/gtsam/imuBias.py b/python/gtsam/imuBias.py new file mode 100644 index 000000000..399cefb98 --- /dev/null +++ b/python/gtsam/imuBias.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.imuBias import X` +# with this trick, we can do `from gtsam.imuBias import X` +from .gtsam.imuBias import * diff --git a/python/gtsam/noiseModel.py b/python/gtsam/noiseModel.py new file mode 100644 index 000000000..6e1b43488 --- /dev/null +++ b/python/gtsam/noiseModel.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.noiseModel import X` +# with this trick, we can do `from gtsam.noiseModel import X` +from .gtsam.noiseModel import * \ No newline at end of file diff --git a/python/gtsam/preamble.h b/python/gtsam/preamble.h new file mode 100644 index 000000000..0a1199122 --- /dev/null +++ b/python/gtsam/preamble.h @@ -0,0 +1,13 @@ +// Please refer to: https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html +// These are required to save one copy operation on Python calls +#ifdef GTSAM_ALLOCATOR_TBB +PYBIND11_MAKE_OPAQUE(std::vector>); +PYBIND11_MAKE_OPAQUE(std::vector >); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector>); +#else +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector >); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector>); +#endif diff --git a/python/gtsam/specializations.h b/python/gtsam/specializations.h new file mode 100644 index 000000000..2bd6a98a1 --- /dev/null +++ b/python/gtsam/specializations.h @@ -0,0 +1,13 @@ +// Please refer to: https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html +// These are required to save one copy operation on Python calls +#ifdef GTSAM_ALLOCATOR_TBB +py::bind_vector > >(m_, "KeyVector"); +py::bind_vector > >(m_, "Point2Vector"); +py::bind_vector >(m_, "Pose3Vector"); +py::bind_vector > > >(m_, "BetweenFactorPose3s"); +#else +py::bind_vector >(m_, "KeyVector"); +py::bind_vector > >(m_, "Point2Vector"); +py::bind_vector >(m_, "Pose3Vector"); +py::bind_vector > > >(m_, "BetweenFactorPose3s"); +#endif diff --git a/python/gtsam/symbol_shorthand.py b/python/gtsam/symbol_shorthand.py new file mode 100644 index 000000000..748d36558 --- /dev/null +++ b/python/gtsam/symbol_shorthand.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.symbol_shorthand import X` +# with this trick, we can do `from gtsam.symbol_shorthand import X` +from .gtsam.symbol_shorthand import * \ No newline at end of file diff --git a/cython/gtsam/tests/testScenarioRunner.py b/python/gtsam/tests/testScenarioRunner.py similarity index 96% rename from cython/gtsam/tests/testScenarioRunner.py rename to python/gtsam/tests/testScenarioRunner.py index 97a97b0ec..2af16a794 100644 --- a/cython/gtsam/tests/testScenarioRunner.py +++ b/python/gtsam/tests/testScenarioRunner.py @@ -32,7 +32,7 @@ class TestScenarioRunner(GtsamTestCase): dt = 0.1 params = gtsam.PreintegrationParams.MakeSharedU(self.g) - bias = gtsam.imuBias_ConstantBias() + bias = gtsam.imuBias.ConstantBias() runner = gtsam.ScenarioRunner( scenario, params, dt, bias) diff --git a/cython/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py similarity index 100% rename from cython/gtsam/tests/test_Cal3Unified.py rename to python/gtsam/tests/test_Cal3Unified.py diff --git a/cython/gtsam/tests/test_FrobeniusFactor.py b/python/gtsam/tests/test_FrobeniusFactor.py similarity index 92% rename from cython/gtsam/tests/test_FrobeniusFactor.py rename to python/gtsam/tests/test_FrobeniusFactor.py index f3f5354bb..e808627f5 100644 --- a/cython/gtsam/tests/test_FrobeniusFactor.py +++ b/python/gtsam/tests/test_FrobeniusFactor.py @@ -13,7 +13,7 @@ import unittest import numpy as np from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4, - FrobeniusWormholeFactor, SOn) + ShonanFactor3, SOn) id = SO4() v1 = np.array([0, 0, 0, 0.1, 0, 0]) @@ -43,7 +43,7 @@ class TestFrobeniusFactorSO4(unittest.TestCase): """Test creation of a factor that calculates Shonan error.""" R1 = SO3.Expmap(v1[3:]) R2 = SO3.Expmap(v2[3:]) - factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4) + factor = ShonanFactor3(1, 2, Rot3(R1.between(R2).matrix()), p=4) I4 = SOn(4) Q1 = I4.retract(v1) Q2 = I4.retract(v2) diff --git a/cython/gtsam/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py similarity index 94% rename from cython/gtsam/tests/test_GaussianFactorGraph.py rename to python/gtsam/tests/test_GaussianFactorGraph.py index 983825d8b..a29b0f263 100644 --- a/cython/gtsam/tests/test_GaussianFactorGraph.py +++ b/python/gtsam/tests/test_GaussianFactorGraph.py @@ -16,7 +16,7 @@ import unittest import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import X from gtsam.utils.test_case import GtsamTestCase @@ -28,8 +28,8 @@ def create_graph(): x1 = X(1) x2 = X(2) - BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) - PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) + BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE) graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE) diff --git a/cython/gtsam/tests/test_JacobianFactor.py b/python/gtsam/tests/test_JacobianFactor.py similarity index 93% rename from cython/gtsam/tests/test_JacobianFactor.py rename to python/gtsam/tests/test_JacobianFactor.py index 04433492b..6e049ed47 100644 --- a/cython/gtsam/tests/test_JacobianFactor.py +++ b/python/gtsam/tests/test_JacobianFactor.py @@ -48,7 +48,7 @@ class TestJacobianFactor(GtsamTestCase): # the RHS b2 = np.array([-1., 1.5, 2., -1.]) sigmas = np.array([1., 1., 1., 1.]) - model4 = gtsam.noiseModel_Diagonal.Sigmas(sigmas) + model4 = gtsam.noiseModel.Diagonal.Sigmas(sigmas) combined = gtsam.JacobianFactor(x2, Ax2, l1, Al1, x1, Ax1, b2, model4) # eliminate the first variable (x2) in the combined factor, destructive @@ -66,7 +66,7 @@ class TestJacobianFactor(GtsamTestCase): [+0.00, -8.94427]]) d = np.array([2.23607, -1.56525]) expectedCG = gtsam.GaussianConditional( - x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2)) + x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel.Unit.Create(2)) # check if the result matches self.gtsamAssertEquals(actualCG, expectedCG, 1e-4) @@ -82,7 +82,7 @@ class TestJacobianFactor(GtsamTestCase): # the RHS b1 = np.array([0.0, 0.894427]) - model2 = gtsam.noiseModel_Diagonal.Sigmas(np.array([1., 1.])) + model2 = gtsam.noiseModel.Diagonal.Sigmas(np.array([1., 1.])) expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2) # check if the result matches the combined (reduced) factor diff --git a/cython/gtsam/tests/test_KalmanFilter.py b/python/gtsam/tests/test_KalmanFilter.py similarity index 95% rename from cython/gtsam/tests/test_KalmanFilter.py rename to python/gtsam/tests/test_KalmanFilter.py index 94c41df72..48a91b96c 100644 --- a/cython/gtsam/tests/test_KalmanFilter.py +++ b/python/gtsam/tests/test_KalmanFilter.py @@ -22,13 +22,13 @@ class TestKalmanFilter(GtsamTestCase): F = np.eye(2) B = np.eye(2) u = np.array([1.0, 0.0]) - modelQ = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelQ = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) Q = 0.01 * np.eye(2) H = np.eye(2) z1 = np.array([1.0, 0.0]) z2 = np.array([2.0, 0.0]) z3 = np.array([3.0, 0.0]) - modelR = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelR = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) R = 0.01 * np.eye(2) # Create the set of expected output TestValues diff --git a/cython/gtsam/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py similarity index 96% rename from cython/gtsam/tests/test_KarcherMeanFactor.py rename to python/gtsam/tests/test_KarcherMeanFactor.py index 6976decc1..a315a506c 100644 --- a/cython/gtsam/tests/test_KarcherMeanFactor.py +++ b/python/gtsam/tests/test_KarcherMeanFactor.py @@ -18,7 +18,7 @@ import numpy as np from gtsam.utils.test_case import GtsamTestCase KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) def find_Karcher_mean_Rot3(rotations): @@ -59,8 +59,8 @@ class TestKarcherMean(GtsamTestCase): R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) keys = gtsam.KeyVector() - keys.push_back(1) - keys.push_back(2) + keys.append(1) + keys.append(2) graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() diff --git a/cython/gtsam/tests/test_LocalizationExample.py b/python/gtsam/tests/test_LocalizationExample.py similarity index 94% rename from cython/gtsam/tests/test_LocalizationExample.py rename to python/gtsam/tests/test_LocalizationExample.py index 6ce65f087..8ae3583f0 100644 --- a/cython/gtsam/tests/test_LocalizationExample.py +++ b/python/gtsam/tests/test_LocalizationExample.py @@ -26,7 +26,7 @@ class TestLocalizationExample(GtsamTestCase): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) @@ -37,7 +37,7 @@ class TestLocalizationExample(GtsamTestCase): groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0)) groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0)) groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0)) - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) for i in range(3): graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model)) diff --git a/cython/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py similarity index 88% rename from cython/gtsam/tests/test_NonlinearOptimizer.py rename to python/gtsam/tests/test_NonlinearOptimizer.py index 985dc30a2..e9234a43b 100644 --- a/cython/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -15,11 +15,11 @@ from __future__ import print_function import unittest import gtsam -from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer, +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, GaussNewtonParams, LevenbergMarquardtOptimizer, - LevenbergMarquardtParams, PCGSolverParameters, - DummyPreconditionerParameters, NonlinearFactorGraph, Ordering, - Point2, PriorFactorPoint2, Values) + LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, + PCGSolverParameters, Point2, PriorFactorPoint2, Values) from gtsam.utils.test_case import GtsamTestCase KEY1 = 1 @@ -30,7 +30,7 @@ class TestScenario(GtsamTestCase): def test_optimize(self): """Do trivial test with three optimizer variants.""" fg = NonlinearFactorGraph() - model = gtsam.noiseModel_Unit.Create(2) + model = gtsam.noiseModel.Unit.Create(2) fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum diff --git a/cython/gtsam/tests/test_OdometryExample.py b/python/gtsam/tests/test_OdometryExample.py similarity index 94% rename from cython/gtsam/tests/test_OdometryExample.py rename to python/gtsam/tests/test_OdometryExample.py index c8ea95588..72e532f20 100644 --- a/cython/gtsam/tests/test_OdometryExample.py +++ b/python/gtsam/tests/test_OdometryExample.py @@ -25,7 +25,7 @@ class TestOdometryExample(GtsamTestCase): # Add a Gaussian prior on pose x_1 priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior mean is at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas( + priorNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) # 30cm std on x,y, 0.1 rad on theta # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) @@ -33,7 +33,7 @@ class TestOdometryExample(GtsamTestCase): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise)) diff --git a/cython/gtsam/tests/test_PlanarSLAMExample.py b/python/gtsam/tests/test_PlanarSLAMExample.py similarity index 93% rename from cython/gtsam/tests/test_PlanarSLAMExample.py rename to python/gtsam/tests/test_PlanarSLAMExample.py index ae813d35c..8cb3ad2ac 100644 --- a/cython/gtsam/tests/test_PlanarSLAMExample.py +++ b/python/gtsam/tests/test_PlanarSLAMExample.py @@ -32,13 +32,13 @@ class TestPlanarSLAM(GtsamTestCase): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ class TestPlanarSLAM(GtsamTestCase): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/cython/gtsam/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py similarity index 100% rename from cython/gtsam/tests/test_Pose2.py rename to python/gtsam/tests/test_Pose2.py diff --git a/cython/gtsam/tests/test_Pose2SLAMExample.py b/python/gtsam/tests/test_Pose2SLAMExample.py similarity index 93% rename from cython/gtsam/tests/test_Pose2SLAMExample.py rename to python/gtsam/tests/test_Pose2SLAMExample.py index a79b6b18c..e47b9fbff 100644 --- a/cython/gtsam/tests/test_Pose2SLAMExample.py +++ b/python/gtsam/tests/test_Pose2SLAMExample.py @@ -32,13 +32,13 @@ class TestPose2SLAMExample(GtsamTestCase): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ class TestPose2SLAMExample(GtsamTestCase): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/cython/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py similarity index 100% rename from cython/gtsam/tests/test_Pose3.py rename to python/gtsam/tests/test_Pose3.py diff --git a/cython/gtsam/tests/test_Pose3SLAMExample.py b/python/gtsam/tests/test_Pose3SLAMExample.py similarity index 97% rename from cython/gtsam/tests/test_Pose3SLAMExample.py rename to python/gtsam/tests/test_Pose3SLAMExample.py index 1e9eaac67..fce171b55 100644 --- a/cython/gtsam/tests/test_Pose3SLAMExample.py +++ b/python/gtsam/tests/test_Pose3SLAMExample.py @@ -30,7 +30,7 @@ class TestPose3SLAMExample(GtsamTestCase): fg = gtsam.NonlinearFactorGraph() fg.add(gtsam.NonlinearEqualityPose3(0, p0)) delta = p0.between(p1) - covariance = gtsam.noiseModel_Diagonal.Sigmas( + covariance = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 5. * pi / 180, 5. * pi / 180, 5. * pi / 180])) fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance)) fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance)) diff --git a/cython/gtsam/tests/test_PriorFactor.py b/python/gtsam/tests/test_PriorFactor.py similarity index 88% rename from cython/gtsam/tests/test_PriorFactor.py rename to python/gtsam/tests/test_PriorFactor.py index 66207b800..0582cf5d7 100644 --- a/cython/gtsam/tests/test_PriorFactor.py +++ b/python/gtsam/tests/test_PriorFactor.py @@ -23,14 +23,14 @@ class TestPriorFactor(GtsamTestCase): key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) factor = gtsam.PriorFactorPose3(key, priorPose3, model) values.insert(key, priorPose3) self.assertEqual(factor.error(values), 0) key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) factor = gtsam.PriorFactorVector(key, priorVector, model) values.insert(key, priorVector) self.assertEqual(factor.error(values), 0) @@ -45,14 +45,14 @@ class TestPriorFactor(GtsamTestCase): # define and add Pose3 prior key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) graph.addPriorPose3(key, priorPose3, model) self.assertEqual(graph.size(), 1) # define and add Vector prior key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) graph.addPriorVector(key, priorVector, model) self.assertEqual(graph.size(), 2) diff --git a/cython/gtsam/tests/test_SFMExample.py b/python/gtsam/tests/test_SFMExample.py similarity index 72% rename from cython/gtsam/tests/test_SFMExample.py rename to python/gtsam/tests/test_SFMExample.py index e8fa46186..47a3cbe3e 100644 --- a/cython/gtsam/tests/test_SFMExample.py +++ b/python/gtsam/tests/test_SFMExample.py @@ -15,8 +15,9 @@ import numpy as np import gtsam import gtsam.utils.visual_data_generator as generator from gtsam import symbol +from gtsam.noiseModel import Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase - +from gtsam.symbol_shorthand import X, P class TestSFMExample(GtsamTestCase): @@ -34,29 +35,29 @@ class TestSFMExample(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() # Add factors for all measurements - measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, measurementNoiseSigma) + measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma) for i in range(len(data.Z)): for k in range(len(data.Z[i])): j = data.J[i][k] graph.add(gtsam.GenericProjectionFactorCal3_S2( data.Z[i][k], measurementNoise, - symbol(ord('x'), i), symbol(ord('p'), j), data.K)) + X(i), P(j), data.K)) - posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas) - graph.add(gtsam.PriorFactorPose3(symbol(ord('x'), 0), + posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas) + graph.add(gtsam.PriorFactorPose3(X(0), truth.cameras[0].pose(), posePriorNoise)) - pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, pointNoiseSigma) - graph.add(gtsam.PriorFactorPoint3(symbol(ord('p'), 0), + pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma) + graph.add(gtsam.PriorFactorPoint3(P(0), truth.points[0], pointPriorNoise)) # Initial estimate initialEstimate = gtsam.Values() for i in range(len(truth.cameras)): pose_i = truth.cameras[i].pose() - initialEstimate.insert(symbol(ord('x'), i), pose_i) + initialEstimate.insert(X(i), pose_i) for j in range(len(truth.points)): point_j = truth.points[j] - initialEstimate.insert(symbol(ord('p'), j), point_j) + initialEstimate.insert(P(j), point_j) # Optimization optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) @@ -66,16 +67,16 @@ class TestSFMExample(GtsamTestCase): # Marginalization marginals = gtsam.Marginals(graph, result) - marginals.marginalCovariance(symbol(ord('p'), 0)) - marginals.marginalCovariance(symbol(ord('x'), 0)) + marginals.marginalCovariance(P(0)) + marginals.marginalCovariance(X(0)) # Check optimized results, should be equal to ground truth for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(X(i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('p'), j)) + point_j = result.atPoint3(P(j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/cython/gtsam/tests/test_SO4.py b/python/gtsam/tests/test_SO4.py similarity index 100% rename from cython/gtsam/tests/test_SO4.py rename to python/gtsam/tests/test_SO4.py diff --git a/cython/gtsam/tests/test_SOn.py b/python/gtsam/tests/test_SOn.py similarity index 100% rename from cython/gtsam/tests/test_SOn.py rename to python/gtsam/tests/test_SOn.py diff --git a/cython/gtsam/tests/test_Scenario.py b/python/gtsam/tests/test_Scenario.py similarity index 90% rename from cython/gtsam/tests/test_Scenario.py rename to python/gtsam/tests/test_Scenario.py index 09601fba5..fc5965829 100644 --- a/cython/gtsam/tests/test_Scenario.py +++ b/python/gtsam/tests/test_Scenario.py @@ -43,8 +43,11 @@ class TestScenario(GtsamTestCase): # R = v/w, so test if loop crests at 2*R R = v / w T30 = scenario.pose(T) + xyz = T30.rotation().xyz() + if xyz[0] < 0: + xyz = -xyz np.testing.assert_almost_equal( - np.array([math.pi, 0, math.pi]), T30.rotation().xyz()) + np.array([math.pi, 0, math.pi]), xyz) self.gtsamAssertEquals(gtsam.Point3( 0, 0, 2.0 * R), T30.translation(), 1e-9) diff --git a/python/gtsam/tests/test_ShonanAveraging.py b/python/gtsam/tests/test_ShonanAveraging.py new file mode 100644 index 000000000..4c423574d --- /dev/null +++ b/python/gtsam/tests/test_ShonanAveraging.py @@ -0,0 +1,139 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Shonan Rotation Averaging. +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +import unittest + +import gtsam +from gtsam import ShonanAveraging3, ShonanAveragingParameters3 +from gtsam.utils.test_case import GtsamTestCase + +DEFAULT_PARAMS = ShonanAveragingParameters3( + gtsam.LevenbergMarquardtParams.CeresDefaults()) + + +def fromExampleName(name: str, parameters=DEFAULT_PARAMS): + g2oFile = gtsam.findExampleDataFile(name) + return ShonanAveraging3(g2oFile, parameters) + + +class TestShonanAveraging(GtsamTestCase): + """Tests for Shonan Rotation Averaging.""" + + def setUp(self): + """Set up common variables.""" + self.shonan = fromExampleName("toyExample.g2o") + + def test_checkConstructor(self): + self.assertEqual(5, self.shonan.nrUnknowns()) + + D = self.shonan.denseD() + self.assertEqual((15, 15), D.shape) + + Q = self.shonan.denseQ() + self.assertEqual((15, 15), Q.shape) + + L = self.shonan.denseL() + self.assertEqual((15, 15), L.shape) + + def test_buildGraphAt(self): + graph = self.shonan.buildGraphAt(5) + self.assertEqual(7, graph.size()) + + def test_checkOptimality(self): + random = self.shonan.initializeRandomlyAt(4) + lambdaMin = self.shonan.computeMinEigenValue(random) + self.assertAlmostEqual(-414.87376657555996, + lambdaMin, places=3) # Regression test + self.assertFalse(self.shonan.checkOptimality(random)) + + def test_tryOptimizingAt3(self): + initial = self.shonan.initializeRandomlyAt(3) + self.assertFalse(self.shonan.checkOptimality(initial)) + result = self.shonan.tryOptimizingAt(3, initial) + self.assertTrue(self.shonan.checkOptimality(result)) + lambdaMin = self.shonan.computeMinEigenValue(result) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + self.assertAlmostEqual(0, self.shonan.costAt(3, result), places=3) + SO3Values = self.shonan.roundSolution(result) + self.assertAlmostEqual(0, self.shonan.cost(SO3Values), places=3) + + def test_tryOptimizingAt4(self): + random = self.shonan.initializeRandomlyAt(4) + result = self.shonan.tryOptimizingAt(4, random) + self.assertTrue(self.shonan.checkOptimality(result)) + self.assertAlmostEqual(0, self.shonan.costAt(4, result), places=2) + lambdaMin = self.shonan.computeMinEigenValue(result) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + SO3Values = self.shonan.roundSolution(result) + self.assertAlmostEqual(0, self.shonan.cost(SO3Values), places=3) + + def test_initializeWithDescent(self): + random = self.shonan.initializeRandomlyAt(3) + Qstar3 = self.shonan.tryOptimizingAt(3, random) + lambdaMin, minEigenVector = self.shonan.computeMinEigenVector(Qstar3) + initialQ4 = self.shonan.initializeWithDescent( + 4, Qstar3, minEigenVector, lambdaMin) + self.assertAlmostEqual(5, initialQ4.size()) + + def test_run(self): + initial = self.shonan.initializeRandomly() + result, lambdaMin = self.shonan.run(initial, 5, 10) + self.assertAlmostEqual(0, self.shonan.cost(result), places=2) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + + def test_runKlausKarcher(self): + # Load 2D toy example + lmParams = gtsam.LevenbergMarquardtParams.CeresDefaults() + # lmParams.setVerbosityLM("SUMMARY") + g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") + parameters = gtsam.ShonanAveragingParameters2(lmParams) + shonan = gtsam.ShonanAveraging2(g2oFile, parameters) + self.assertAlmostEqual(4, shonan.nrUnknowns()) + + # Check graph building + graph = shonan.buildGraphAt(2) + self.assertAlmostEqual(6, graph.size()) + initial = shonan.initializeRandomly() + result, lambdaMin = shonan.run(initial, 2, 10) + self.assertAlmostEqual(0.0008211, shonan.cost(result), places=5) + self.assertAlmostEqual(0, lambdaMin, places=9) # certificate! + + # Test alpha/beta/gamma prior weighting. + def test_PriorWeights(self): + lmParams = gtsam.LevenbergMarquardtParams.CeresDefaults() + params = ShonanAveragingParameters3(lmParams) + self.assertAlmostEqual(0, params.getAnchorWeight(), 1e-9) + self.assertAlmostEqual(1, params.getKarcherWeight(), 1e-9) + self.assertAlmostEqual(0, params.getGaugesWeight(), 1e-9) + alpha, beta, gamma = 100.0, 200.0, 300.0 + params.setAnchorWeight(alpha) + params.setKarcherWeight(beta) + params.setGaugesWeight(gamma) + self.assertAlmostEqual(alpha, params.getAnchorWeight(), 1e-9) + self.assertAlmostEqual(beta, params.getKarcherWeight(), 1e-9) + self.assertAlmostEqual(gamma, params.getGaugesWeight(), 1e-9) + params.setKarcherWeight(0) + shonan = fromExampleName("Klaus3.g2o", params) + + initial = gtsam.Values() + for i in range(3): + initial.insert(i, gtsam.Rot3()) + self.assertAlmostEqual(3.0756, shonan.cost(initial), places=3) + result, _lambdaMin = shonan.run(initial, 3, 3) + self.assertAlmostEqual(0.0015, shonan.cost(result), places=3) + + +if __name__ == '__main__': + unittest.main() diff --git a/cython/gtsam/tests/test_SimpleCamera.py b/python/gtsam/tests/test_SimpleCamera.py similarity index 82% rename from cython/gtsam/tests/test_SimpleCamera.py rename to python/gtsam/tests/test_SimpleCamera.py index a3654a5f1..efdfec561 100644 --- a/cython/gtsam/tests/test_SimpleCamera.py +++ b/python/gtsam/tests/test_SimpleCamera.py @@ -5,7 +5,7 @@ All Rights Reserved See LICENSE for the license information -PinholeCameraCal3_S2 unit tests. +SimpleCamera unit tests. Author: Frank Dellaert & Duy Nguyen Ta (Python) """ import math @@ -14,7 +14,7 @@ import unittest import numpy as np import gtsam -from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2 +from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, SimpleCamera from gtsam.utils.test_case import GtsamTestCase K = Cal3_S2(625, 625, 0, 0, 0) @@ -23,14 +23,14 @@ class TestSimpleCamera(GtsamTestCase): def test_constructor(self): pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5)) - camera = PinholeCameraCal3_S2(pose1, K) + camera = SimpleCamera(pose1, K) self.gtsamAssertEquals(camera.calibration(), K, 1e-9) self.gtsamAssertEquals(camera.pose(), pose1, 1e-9) def test_level2(self): # Create a level camera, looking in Y-direction pose2 = Pose2(0.4,0.3,math.pi/2.0) - camera = PinholeCameraCal3_S2.Level(K, pose2, 0.1) + camera = SimpleCamera.Level(K, pose2, 0.1) # expected x = Point3(1,0,0) diff --git a/cython/gtsam/tests/test_StereoVOExample.py b/python/gtsam/tests/test_StereoVOExample.py similarity index 92% rename from cython/gtsam/tests/test_StereoVOExample.py rename to python/gtsam/tests/test_StereoVOExample.py index 3f5f57522..cefc08aab 100644 --- a/cython/gtsam/tests/test_StereoVOExample.py +++ b/python/gtsam/tests/test_StereoVOExample.py @@ -28,11 +28,11 @@ class TestStereoVOExample(GtsamTestCase): # - No noise on measurements ## Create keys for variables - x1 = symbol(ord('x'),1) - x2 = symbol(ord('x'),2) - l1 = symbol(ord('l'),1) - l2 = symbol(ord('l'),2) - l3 = symbol(ord('l'),3) + x1 = symbol('x',1) + x2 = symbol('x',2) + l1 = symbol('l',1) + l2 = symbol('l',2) + l3 = symbol('l',3) ## Create graph container and add factors to it graph = gtsam.NonlinearFactorGraph() @@ -44,7 +44,7 @@ class TestStereoVOExample(GtsamTestCase): ## Create realistic calibration and measurement noise model # format: fx fy skew cx cy baseline K = gtsam.Cal3_S2Stereo(1000, 1000, 0, 320, 240, 0.2) - stereo_model = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) + stereo_model = gtsam.noiseModel.Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) ## Add measurements # pose 1 diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py new file mode 100644 index 000000000..b43ad9b57 --- /dev/null +++ b/python/gtsam/tests/test_Triangulation.py @@ -0,0 +1,80 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Test Triangulation +Author: Frank Dellaert & Fan Jiang (Python) +""" +import unittest + +import numpy as np + +import gtsam as g +from gtsam.utils.test_case import GtsamTestCase +from gtsam import Cal3_S2, Cal3Bundler, Rot3, Pose3, \ + PinholeCameraCal3_S2, Point3, Point2Vector, Pose3Vector, triangulatePoint3 + +class TestVisualISAMExample(GtsamTestCase): + def test_TriangulationExample(self): + # Some common constants + sharedCal = Cal3_S2(1500, 1200, 0, 640, 480) + + # Looking along X-axis, 1 meter above ground plane (x-y) + upright = Rot3.Ypr(-np.pi / 2, 0., -np.pi / 2) + pose1 = Pose3(upright, Point3(0, 0, 1)) + camera1 = PinholeCameraCal3_S2(pose1, sharedCal) + + # create second camera 1 meter to the right of first camera + pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) + camera2 = PinholeCameraCal3_S2(pose2, sharedCal) + + # landmark ~5 meters infront of camera + landmark = Point3(5, 0.5, 1.2) + + # 1. Project two landmarks into two cameras and triangulate + z1 = camera1.project(landmark) + z2 = camera2.project(landmark) + + # twoPoses + poses = Pose3Vector() + measurements = Point2Vector() + + poses.append(pose1) + poses.append(pose2) + measurements.append(z1) + measurements.append(z2) + + optimize = True + rank_tol = 1e-9 + + triangulated_landmark = triangulatePoint3(poses,sharedCal, measurements, rank_tol, optimize) + self.gtsamAssertEquals(landmark, triangulated_landmark,1e-9) + + # 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + measurements = Point2Vector() + measurements.append(z1 - np.array([0.1, 0.5])) + measurements.append(z2 - np.array([-0.2, 0.3])) + + triangulated_landmark = triangulatePoint3(poses,sharedCal, measurements, rank_tol, optimize) + self.gtsamAssertEquals(landmark, triangulated_landmark,1e-2) + # + # # two Poses with Bundler Calibration + # bundlerCal = Cal3Bundler(1500, 0, 0, 640, 480) + # camera1 = PinholeCameraCal3Bundler(pose1, bundlerCal) + # camera2 = PinholeCameraCal3Bundler(pose2, bundlerCal) + # + # z1 = camera1.project(landmark) + # z2 = camera2.project(landmark) + # + # measurements = Point2Vector() + # measurements.append(z1) + # measurements.append(z2) + # + # triangulated_landmark = triangulatePoint3(poses,bundlerCal, measurements, rank_tol, optimize) + # self.gtsamAssertEquals(landmark, triangulated_landmark,1e-9) + +if __name__ == "__main__": + unittest.main() diff --git a/cython/gtsam/tests/test_Values.py b/python/gtsam/tests/test_Values.py similarity index 94% rename from cython/gtsam/tests/test_Values.py rename to python/gtsam/tests/test_Values.py index 20634a21c..dddd11c40 100644 --- a/cython/gtsam/tests/test_Values.py +++ b/python/gtsam/tests/test_Values.py @@ -15,8 +15,7 @@ import numpy as np import gtsam from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2, - Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, - imuBias_ConstantBias) + Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, imuBias) from gtsam.utils.test_case import GtsamTestCase @@ -37,7 +36,7 @@ class TestValues(GtsamTestCase): values.insert(7, Cal3DS2()) values.insert(8, Cal3Bundler()) values.insert(9, E) - values.insert(10, imuBias_ConstantBias()) + values.insert(10, imuBias.ConstantBias()) # Special cases for Vectors and Matrices # Note that gtsam's Eigen Vectors and Matrices requires double-precision @@ -70,8 +69,8 @@ class TestValues(GtsamTestCase): self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol) self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol) self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol) - self.gtsamAssertEquals(values.atimuBias_ConstantBias( - 10), imuBias_ConstantBias(), tol) + self.gtsamAssertEquals(values.atConstantBias( + 10), imuBias.ConstantBias(), tol) # special cases for Vector and Matrix: actualVector = values.atVector(11) diff --git a/cython/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py similarity index 93% rename from cython/gtsam/tests/test_VisualISAMExample.py rename to python/gtsam/tests/test_VisualISAMExample.py index 99d7e6160..6eb05eeee 100644 --- a/cython/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -46,11 +46,11 @@ class TestVisualISAMExample(GtsamTestCase): isam, result = visual_isam.step(data, isam, result, truth, currentPose) for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(symbol('x', i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('l'), j)) + point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/cython/gtsam/tests/test_dataset.py b/python/gtsam/tests/test_dataset.py similarity index 87% rename from cython/gtsam/tests/test_dataset.py rename to python/gtsam/tests/test_dataset.py index 60fb9450d..87fc2ad54 100644 --- a/cython/gtsam/tests/test_dataset.py +++ b/python/gtsam/tests/test_dataset.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest import gtsam -from gtsam import BetweenFactorPose3, BetweenFactorPose3s +from gtsam import BetweenFactorPose3 from gtsam.utils.test_case import GtsamTestCase @@ -37,8 +37,8 @@ class TestDataset(GtsamTestCase): def test_parse3Dfactors(self): """Test parsing into data structure.""" factors = gtsam.parse3DFactors(self.pose3_example_g2o_file) - self.assertEqual(factors.size(), 6) - self.assertIsInstance(factors.at(0), BetweenFactorPose3) + self.assertEqual(len(factors), 6) + self.assertIsInstance(factors[0], BetweenFactorPose3) if __name__ == '__main__': diff --git a/cython/gtsam/tests/test_dsf_map.py b/python/gtsam/tests/test_dsf_map.py similarity index 100% rename from cython/gtsam/tests/test_dsf_map.py rename to python/gtsam/tests/test_dsf_map.py diff --git a/cython/gtsam/tests/test_initialize_pose3.py b/python/gtsam/tests/test_initialize_pose3.py similarity index 96% rename from cython/gtsam/tests/test_initialize_pose3.py rename to python/gtsam/tests/test_initialize_pose3.py index 3aa7e3470..6d7f66653 100644 --- a/cython/gtsam/tests/test_initialize_pose3.py +++ b/python/gtsam/tests/test_initialize_pose3.py @@ -24,7 +24,7 @@ class TestValues(GtsamTestCase): def setUp(self): - model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + model = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) # We consider a small graph: # symbolic FG @@ -64,9 +64,8 @@ class TestValues(GtsamTestCase): def test_orientations(self): pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph) - initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph) - + # comparison is up to M_PI, that's why we add some multiples of 2*M_PI self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6) self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6) @@ -77,7 +76,7 @@ class TestValues(GtsamTestCase): g2oFile = gtsam.findExampleDataFile("pose3example-grid") is3D = True inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D) - priorModel = gtsam.noiseModel_Unit.Create(6) + priorModel = gtsam.noiseModel.Unit.Create(6) inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel)) initial = gtsam.InitializePose3.initialize(inputGraph) diff --git a/cython/gtsam/tests/test_logging_optimizer.py b/python/gtsam/tests/test_logging_optimizer.py similarity index 98% rename from cython/gtsam/tests/test_logging_optimizer.py rename to python/gtsam/tests/test_logging_optimizer.py index 2560a72a2..47eb32e7b 100644 --- a/cython/gtsam/tests/test_logging_optimizer.py +++ b/python/gtsam/tests/test_logging_optimizer.py @@ -21,7 +21,7 @@ from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.logging_optimizer import gtsam_optimize KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) class TestOptimizeComet(GtsamTestCase): diff --git a/cython/gtsam/tests/__init__.py b/python/gtsam/utils/__init__.py similarity index 100% rename from cython/gtsam/tests/__init__.py rename to python/gtsam/utils/__init__.py diff --git a/cython/gtsam/utils/circlePose3.py b/python/gtsam/utils/circlePose3.py similarity index 79% rename from cython/gtsam/utils/circlePose3.py rename to python/gtsam/utils/circlePose3.py index 7012548f4..e1def9427 100644 --- a/cython/gtsam/utils/circlePose3.py +++ b/python/gtsam/utils/circlePose3.py @@ -1,9 +1,10 @@ import gtsam +import math import numpy as np -from math import pi, cos, sin +from math import pi -def circlePose3(numPoses=8, radius=1.0, symbolChar=0): +def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'): """ circlePose3 generates a set of poses in a circle. This function returns those poses inside a gtsam.Values object, with sequential @@ -18,10 +19,6 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): Vehicle at p0 is looking towards y axis (X-axis points towards world y) """ - # Force symbolChar to be a single character - if type(symbolChar) is str: - symbolChar = ord(symbolChar[0]) - values = gtsam.Values() theta = 0.0 dtheta = 2 * pi / numPoses @@ -29,7 +26,7 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F')) for i in range(numPoses): key = gtsam.symbol(symbolChar, i) - gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0) + gti = gtsam.Point3(radius * math.cos(theta), radius * math.sin(theta), 0) oRi = gtsam.Rot3.Yaw( -theta) # negative yaw goes counterclockwise, with Z down ! gTi = gtsam.Pose3(gRo.compose(oRi), gti) diff --git a/cython/gtsam/utils/logging_optimizer.py b/python/gtsam/utils/logging_optimizer.py similarity index 100% rename from cython/gtsam/utils/logging_optimizer.py rename to python/gtsam/utils/logging_optimizer.py diff --git a/cython/gtsam/utils/plot.py b/python/gtsam/utils/plot.py similarity index 94% rename from cython/gtsam/utils/plot.py rename to python/gtsam/utils/plot.py index b55f5206a..0267da8c3 100644 --- a/cython/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -109,7 +109,7 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global t = pose.translation() - origin = np.array([t.x(), t.y()]) + origin = t # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -169,9 +169,9 @@ def plot_point3_on_axes(axes, point, linespec, P=None): linespec (string): String representing formatting options for Matplotlib. P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. """ - axes.plot([point.x()], [point.y()], [point.z()], linespec) + axes.plot([point[0]], [point[1]], [point[2]], linespec) if P is not None: - plot_covariance_ellipse_3d(axes, point.vector(), P) + plot_covariance_ellipse_3d(axes, point, P) def plot_point3(fignum, point, linespec, P=None, @@ -221,9 +221,8 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None, keys = values.keys() # Plot points and covariance matrices - for i in range(keys.size()): + for key in keys: try: - key = keys.at(i) point = values.atPoint3(key) if marginals is not None: covariance = marginals.marginalCovariance(key) @@ -253,7 +252,7 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global - origin = pose.translation().vector() + origin = pose.translation() # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -319,19 +318,17 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, title (string): The title of the plot. axis_labels (iterable[string]): List of axis labels to set. """ - pose3Values = gtsam.utilities_allPose3s(values) + pose3Values = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) - lastIndex = None + lastKey = None - for i in range(keys.size()): - key = keys.at(i) + for key in keys: try: pose = pose3Values.atPose3(key) except: print("Warning: no Pose3 at key: {0}".format(key)) - if lastIndex is not None: - lastKey = keys.at(lastIndex) + if lastKey is not None: try: lastPose = pose3Values.atPose3(lastKey) except: @@ -346,11 +343,10 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, fig = plot_pose3(fignum, lastPose, P=covariance, axis_length=scale, axis_labels=axis_labels) - lastIndex = i + lastKey = key # Draw final pose - if lastIndex is not None: - lastKey = keys.at(lastIndex) + if lastKey is not None: try: lastPose = pose3Values.atPose3(lastKey) if marginals: @@ -387,13 +383,12 @@ def plot_incremental_trajectory(fignum, values, start=0, fig = plt.figure(fignum) axes = fig.gca(projection='3d') - pose3Values = gtsam.utilities_allPose3s(values) + pose3Values = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) - for i in range(start, keys.size()): - key = keys.at(i) + for key in keys[start:]: if values.exists(key): - pose_i = values.atPose3(keys.at(i)) + pose_i = values.atPose3(key) plot_pose3(fignum, pose_i, scale) # Update the plot space to encompass all plotted points diff --git a/cython/gtsam/utils/test_case.py b/python/gtsam/utils/test_case.py similarity index 80% rename from cython/gtsam/utils/test_case.py rename to python/gtsam/utils/test_case.py index 7df1e6ee9..3effd7f65 100644 --- a/cython/gtsam/utils/test_case.py +++ b/python/gtsam/utils/test_case.py @@ -21,7 +21,11 @@ class GtsamTestCase(unittest.TestCase): Keyword Arguments: tol {float} -- tolerance passed to 'equals', default 1e-9 """ - equal = actual.equals(expected, tol) + import numpy + if isinstance(expected, numpy.ndarray): + equal = numpy.allclose(actual, expected, atol=tol) + else: + equal = actual.equals(expected, tol) if not equal: raise self.failureException( "Values are not equal:\n{}!={}".format(actual, expected)) diff --git a/cython/gtsam/utils/visual_data_generator.py b/python/gtsam/utils/visual_data_generator.py similarity index 73% rename from cython/gtsam/utils/visual_data_generator.py rename to python/gtsam/utils/visual_data_generator.py index 5ce72fe68..32ccbc8fa 100644 --- a/cython/gtsam/utils/visual_data_generator.py +++ b/python/gtsam/utils/visual_data_generator.py @@ -1,9 +1,10 @@ from __future__ import print_function import numpy as np - +import math +from math import pi import gtsam -from gtsam import Cal3_S2, PinholeCameraCal3_S2, Point2, Point3, Pose3 +from gtsam import Point3, Pose3, PinholeCameraCal3_S2, Cal3_S2 class Options: @@ -30,8 +31,8 @@ class GroundTruth: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.cameras = [gtsam.Pose3()] * nrCameras - self.points = [gtsam.Point3(0, 0, 0)] * nrPoints + self.cameras = [Pose3()] * nrCameras + self.points = [Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -55,20 +56,20 @@ class Data: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.Z = [x[:] for x in [[Point2()] * nrPoints] * nrCameras] + self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras] self.J = [x[:] for x in [[0] * nrPoints] * nrCameras] self.odometry = [Pose3()] * nrCameras # Set Noise parameters self.noiseModels = Data.NoiseModels() - self.noiseModels.posePrior = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.posePrior = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])) - # noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + # noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( # np.array([0.001,0.001,0.001,0.1,0.1,0.1])) - self.noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 0.2, 0.2, 0.2])) - self.noiseModels.pointPrior = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - self.noiseModels.measurement = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) + self.noiseModels.pointPrior = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + self.noiseModels.measurement = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) def generate_data(options): @@ -84,8 +85,8 @@ def generate_data(options): if options.triangle: # Create a triangle target, just 3 points on a plane r = 10 for j in range(len(truth.points)): - theta = j * 2 * np.pi / nrPoints - truth.points[j] = Point3(r * np.cos(theta), r * np.sin(theta), 0) + theta = j * 2 * pi / nrPoints + truth.points[j] = Point3(r * math.cos(theta), r * math.sin(theta), 0) else: # 3D landmarks as vertices of a cube truth.points = [ Point3(10, 10, 10), Point3(-10, 10, 10), @@ -98,12 +99,12 @@ def generate_data(options): height = 10 r = 40 for i in range(options.nrCameras): - theta = i * 2 * np.pi / options.nrCameras - t = gtsam.Point3(r * np.cos(theta), r * np.sin(theta), height) - truth.cameras[i] = gtsam.SimpleCamera.Lookat(t, - gtsam.Point3(0, 0, 0), - gtsam.Point3(0, 0, 1), - truth.K) + theta = i * 2 * pi / options.nrCameras + t = Point3(r * math.cos(theta), r * math.sin(theta), height) + truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, + Point3(0, 0, 0), + Point3(0, 0, 1), + truth.K) # Create measurements for j in range(nrPoints): # All landmarks seen in every frame diff --git a/cython/gtsam/utils/visual_isam.py b/python/gtsam/utils/visual_isam.py similarity index 88% rename from cython/gtsam/utils/visual_isam.py rename to python/gtsam/utils/visual_isam.py index b0ebe68c3..a8fed4b23 100644 --- a/cython/gtsam/utils/visual_isam.py +++ b/python/gtsam/utils/visual_isam.py @@ -25,7 +25,7 @@ def initialize(data, truth, options): newFactors = gtsam.NonlinearFactorGraph() initialEstimates = gtsam.Values() for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) if i == 0: if options.hardConstraint: # add hard constraint newFactors.add( @@ -41,10 +41,10 @@ def initialize(data, truth, options): # Add visual measurement factors from two first poses and initialize # observed landmarks for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) for k in range(len(data.Z[i])): j = data.J[i][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2(data.Z[i][ k], data.noiseModels.measurement, ii, jj, data.K)) @@ -59,8 +59,8 @@ def initialize(data, truth, options): # Add odometry between frames 0 and 1 newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), 0), - symbol(ord('x'), 1), data.odometry[1], data.noiseModels.odometry)) + symbol('x', 0), + symbol('x', 1), data.odometry[1], data.noiseModels.odometry)) # Update ISAM if options.batchInitialization: # Do a full optimize for first two poses @@ -98,28 +98,28 @@ def step(data, isam, result, truth, currPoseIndex): odometry = data.odometry[prevPoseIndex] newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), prevPoseIndex), - symbol(ord('x'), currPoseIndex), odometry, + symbol('x', prevPoseIndex), + symbol('x', currPoseIndex), odometry, data.noiseModels.odometry)) # Add visual measurement factors and initializations as necessary for k in range(len(data.Z[currPoseIndex])): zij = data.Z[currPoseIndex][k] j = data.J[currPoseIndex][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2( zij, data.noiseModels.measurement, - symbol(ord('x'), currPoseIndex), jj, data.K)) + symbol('x', currPoseIndex), jj, data.K)) # TODO: initialize with something other than truth if not result.exists(jj) and not initialEstimates.exists(jj): lmInit = truth.points[j] initialEstimates.insert(jj, lmInit) # Initial estimates for the new pose. - prevPose = result.atPose3(symbol(ord('x'), prevPoseIndex)) + prevPose = result.atPose3(symbol('x', prevPoseIndex)) initialEstimates.insert( - symbol(ord('x'), currPoseIndex), prevPose.compose(odometry)) + symbol('x', currPoseIndex), prevPose.compose(odometry)) # Update ISAM # figure(1)tic diff --git a/cython/gtsam_unstable/__init__.py b/python/gtsam_unstable/__init__.py similarity index 100% rename from cython/gtsam_unstable/__init__.py rename to python/gtsam_unstable/__init__.py diff --git a/cython/gtsam_unstable/examples/FixedLagSmootherExample.py b/python/gtsam_unstable/examples/FixedLagSmootherExample.py similarity index 84% rename from cython/gtsam_unstable/examples/FixedLagSmootherExample.py rename to python/gtsam_unstable/examples/FixedLagSmootherExample.py index 786701e0f..7d2cea8ae 100644 --- a/cython/gtsam_unstable/examples/FixedLagSmootherExample.py +++ b/python/gtsam_unstable/examples/FixedLagSmootherExample.py @@ -16,16 +16,6 @@ import numpy as np import gtsam import gtsam_unstable - -def _timestamp_key_value(key, value): - """ - - """ - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - def BatchFixedLagSmootherExample(): """ Runs a batch fixed smoother on an agent with two odometry @@ -45,21 +35,21 @@ def BatchFixedLagSmootherExample(): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s] @@ -69,14 +59,14 @@ def BatchFixedLagSmootherExample(): # Add odometry factors from two different sources with different error # stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_1, odometry_noise_1 )) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_2, odometry_noise_2 diff --git a/cython/gtsam_unstable/examples/TimeOfArrivalExample.py b/python/gtsam_unstable/examples/TimeOfArrivalExample.py similarity index 98% rename from cython/gtsam_unstable/examples/TimeOfArrivalExample.py rename to python/gtsam_unstable/examples/TimeOfArrivalExample.py index 6ba06f0f2..59f008a05 100644 --- a/cython/gtsam_unstable/examples/TimeOfArrivalExample.py +++ b/python/gtsam_unstable/examples/TimeOfArrivalExample.py @@ -12,7 +12,7 @@ Author: Frank Dellaert # pylint: disable=invalid-name, no-name-in-module from gtsam import (LevenbergMarquardtOptimizer, LevenbergMarquardtParams, - NonlinearFactorGraph, Point3, Values, noiseModel_Isotropic) + NonlinearFactorGraph, Point3, Values, noiseModel) from gtsam_unstable import Event, TimeOfArrival, TOAFactor # units @@ -64,7 +64,7 @@ def create_graph(microphones, simulatedTOA): graph = NonlinearFactorGraph() # Create a noise model for the TOA error - model = noiseModel_Isotropic.Sigma(1, 0.5 * MS) + model = noiseModel.Isotropic.Sigma(1, 0.5 * MS) K = len(microphones) key = 0 diff --git a/cython/gtsam/utils/__init__.py b/python/gtsam_unstable/examples/__init__.py similarity index 100% rename from cython/gtsam/utils/__init__.py rename to python/gtsam_unstable/examples/__init__.py diff --git a/python/gtsam_unstable/gtsam_unstable.tpl b/python/gtsam_unstable/gtsam_unstable.tpl new file mode 100644 index 000000000..1d9dfaa40 --- /dev/null +++ b/python/gtsam_unstable/gtsam_unstable.tpl @@ -0,0 +1,44 @@ +/** + * @file gtsam.cpp + * @brief The auto-generated wrapper C++ source code. + * @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar + * @date Aug. 18, 2020 + * + * ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! ** + */ + +// Include relevant boost libraries required by GTSAM +{include_boost} + +#include +#include +#include +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +// These are the included headers listed in `gtsam_unstable.i` +{includes} +#include + +{boost_class_export} + +{hoder_type} + +#include "python/gtsam_unstable/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + + // Note here we need to import the dependent library + py::module::import("gtsam"); + +{wrapped_namespace} + +#include "python/gtsam_unstable/specializations.h" + +}} + diff --git a/cython/gtsam_unstable/examples/__init__.py b/python/gtsam_unstable/preamble.h similarity index 100% rename from cython/gtsam_unstable/examples/__init__.py rename to python/gtsam_unstable/preamble.h diff --git a/cython/gtsam_unstable/tests/__init__.py b/python/gtsam_unstable/specializations.h similarity index 100% rename from cython/gtsam_unstable/tests/__init__.py rename to python/gtsam_unstable/specializations.h diff --git a/python/gtsam_unstable/tests/__init__.py b/python/gtsam_unstable/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py similarity index 88% rename from cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py rename to python/gtsam_unstable/tests/test_FixedLagSmootherExample.py index 8d3af311f..c1ccd1ea1 100644 --- a/cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py @@ -16,13 +16,6 @@ import gtsam import gtsam_unstable from gtsam.utils.test_case import GtsamTestCase - -def _timestamp_key_value(key, value): - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - class TestFixedLagSmootherExample(GtsamTestCase): ''' Tests the fixed lag smoother wrapper @@ -47,14 +40,14 @@ class TestFixedLagSmootherExample(GtsamTestCase): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas( + prior_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back( gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 @@ -80,11 +73,11 @@ class TestFixedLagSmootherExample(GtsamTestCase): # and its two odometers measure the change. The smoothed # result is then compared to the ground truth while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * @@ -95,7 +88,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): # Add odometry factors from two different sources with different # error stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( @@ -105,7 +98,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): odometry_noise_1)) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 000000000..481d27d8e --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,2 @@ +numpy>=1.11.0 +pyparsing>=2.4.2 diff --git a/cython/setup.py.in b/python/setup.py.in similarity index 70% rename from cython/setup.py.in rename to python/setup.py.in index 98a05c9f6..55431a9ad 100644 --- a/cython/setup.py.in +++ b/python/setup.py.in @@ -1,25 +1,20 @@ import os import sys + try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages -packages = find_packages() - +packages = find_packages(where=".") +print("PACKAGES: ", packages) package_data = { - package: - [f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')] - for package in packages + '': [ + './*.so', + './*.dll', + ] } -cython_install_requirements = open("${CYTHON_INSTALL_REQUIREMENTS_FILE}").readlines() - -install_requires = [line.strip() \ - for line in cython_install_requirements \ - if len(line.strip()) > 0 and not line.strip().startswith('#') -] - # Cleaner to read in the contents rather than copy them over. readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read() @@ -33,8 +28,6 @@ setup( license='Simplified BSD license', keywords='slam sam robotics localization mapping optimization', long_description=readme_contents, - long_description_content_type='text/markdown', - python_requires='>=2.7', # https://pypi.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', @@ -48,8 +41,9 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], - packages=packages, package_data=package_data, - install_requires=install_requires + test_suite="gtsam.tests", + install_requires=["numpy"], + zip_safe=False, ) diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 2616ab103..dc19801a2 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -541,7 +541,6 @@ TEST(NonlinearOptimizer, subclass_solver) { } /* ************************************************************************* */ -#include TEST( NonlinearOptimizer, logfile ) { NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph()); diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeShonanFactor.cpp similarity index 92% rename from timing/timeFrobeniusFactor.cpp rename to timing/timeShonanFactor.cpp index 924213a33..207d54a4d 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -10,8 +10,8 @@ * -------------------------------------------------------------------------- */ /** - * @file timeFrobeniusFactor.cpp - * @brief time FrobeniusFactor with BAL file + * @file timeShonanFactor.cpp + * @brief time ShonanFactor with BAL file * @author Frank Dellaert * @date 2019 */ @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2); int main(int argc, char* argv[]) { // primitive argument parsing: if (argc > 3) { - throw runtime_error("Usage: timeFrobeniusFactor [g2oFile]"); + throw runtime_error("Usage: timeShonanFactor [g2oFile]"); } string g2oFile; @@ -70,7 +70,7 @@ int main(int argc, char* argv[]) { const auto &keys = m.keys(); const Rot3 &Rij = m.measured(); const auto &model = m.noiseModel(); - graph.emplace_shared( + graph.emplace_shared( keys[0], keys[1], Rij, 4, model, G); } diff --git a/update_wrap.sh b/update_wrap.sh new file mode 100755 index 000000000..61f55e32e --- /dev/null +++ b/update_wrap.sh @@ -0,0 +1,13 @@ +#!/bin/bash +REF=${1-master} # branch or tag; defaults to 'master' if parameter 1 not present +REMOTE=wrap # just a name to identify the remote +REPO=git@github.com:borglab/wrap.git # replace this with your repository URL +FOLDER=wrap # where to mount the subtree + +git remote add $REMOTE --no-tags $REPO +if [[ -d $FOLDER ]]; then # update the existing subtree + git subtree pull $REMOTE $REF --prefix=$FOLDER --squash -m "Merging '$REF' into '$FOLDER'" +else # add the subtree + git subtree add $REMOTE $REF --prefix=$FOLDER --squash -m "Merging '$REF' into '$FOLDER'" +fi +git remote remove $REMOTE \ No newline at end of file diff --git a/wrap/.github/workflows/ci.yml b/wrap/.github/workflows/ci.yml new file mode 100644 index 000000000..2e38bc3dd --- /dev/null +++ b/wrap/.github/workflows/ci.yml @@ -0,0 +1,52 @@ +name: Python CI + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} 🐍 ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + PYTHON_VERSION: ${{ matrix.python_version }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04 + ] + + python_version: [3] + include: + - name: ubuntu-18.04 + os: ubuntu-18.04 + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + sudo pip$PYTHON_VERSION install -r requirements.txt + cd tests + python$PYTHON_VERSION test_pybind_wrapper.py + python$PYTHON_VERSION test_matlab_wrapper.py + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + pip$PYTHON_VERSION install -r requirements.txt + cd tests + python$PYTHON_VERSION test_pybind_wrapper.py + python$PYTHON_VERSION test_matlab_wrapper.py \ No newline at end of file diff --git a/wrap/.gitignore b/wrap/.gitignore new file mode 100644 index 000000000..38da6d9d1 --- /dev/null +++ b/wrap/.gitignore @@ -0,0 +1,2 @@ +__pycache__/ +.vscode/ diff --git a/wrap/Argument.cpp b/wrap/Argument.cpp deleted file mode 100644 index f85aed72e..000000000 --- a/wrap/Argument.cpp +++ /dev/null @@ -1,316 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Argument.h" -#include "Class.h" - -#include - -#include -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -Argument Argument::expandTemplate(const TemplateSubstitution& ts) const { - Argument instArg = *this; - instArg.type = ts.tryToSubstitite(type); - return instArg; -} - -/* ************************************************************************* */ -ArgumentList ArgumentList::expandTemplate( - const TemplateSubstitution& ts) const { - ArgumentList instArgList; - for(const Argument& arg: *this) { - Argument instArg = arg.expandTemplate(ts); - instArgList.push_back(instArg); - } - return instArgList; -} - -/* ************************************************************************* */ -string Argument::matlabClass(const string& delim) const { - string result; - for(const string& ns: type.namespaces()) - result += ns + delim; - if (type.name() == "string" || type.name() == "unsigned char" - || type.name() == "char") - return result + "char"; - if (type.name() == "Vector" || type.name() == "Matrix") - return result + "double"; - if (type.name() == "int" || type.name() == "size_t") - return result + "numeric"; - if (type.name() == "bool") - return result + "logical"; - return result + type.name(); -} - -/* ************************************************************************* */ -void Argument::matlab_unwrap(FileWriter& file, const string& matlabName) const { - file.oss << " "; - - string cppType = type.qualifiedName("::"); - string matlabUniqueType = type.qualifiedName(); - bool isNotScalar = !type.isScalar(); - - // We cannot handle scalar non const references - if (!isNotScalar && is_ref && !is_const) { - throw std::runtime_error("Cannot unwrap a scalar non-const reference"); - } - - if (is_ptr && type.category != Qualified::EIGEN) - // A pointer: emit an "unwrap_shared_ptr" call which returns a pointer - file.oss << "boost::shared_ptr<" << cppType << "> " << name - << " = unwrap_shared_ptr< "; - else if (is_ref && isNotScalar && type.category != Qualified::EIGEN) - // A reference: emit an "unwrap_shared_ptr" call and de-reference the pointer - file.oss << cppType << "& " << name << " = *unwrap_shared_ptr< "; - else - // Not a pointer, or a reference to a scalar type. Therefore, emit an "unwrap" call - // unwrap is specified in matlab.h as a series of template specializations - // that know how to unpack the expected MATLAB object - // example: double tol = unwrap< double >(in[2]); - // example: Vector v = unwrap< Vector >(in[1]); - file.oss << cppType << " " << name << " = unwrap< "; - - file.oss << cppType << " >(" << matlabName; - if( (is_ptr || is_ref) && isNotScalar && type.category != Qualified::EIGEN) - file.oss << ", \"ptr_" << matlabUniqueType << "\""; - file.oss << ");" << endl; -} - -/* ************************************************************************* */ -void Argument::proxy_check(FileWriter& proxyFile, const string& s) const { - proxyFile.oss << "isa(" << s << ",'" << matlabClass(".") << "')"; - if (type.name() == "Vector") - proxyFile.oss << " && size(" << s << ",2)==1"; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType = type.pxdClassName(); - if (cythonType == "This") cythonType = className; - else if (type.isEigen()) - cythonType = "const " + cythonType + "&"; - else if (type.match(templateArgs)) - cythonType = type.name(); - - // add modifier - if (!type.isEigen()) { - if (is_ptr) cythonType = "shared_ptr[" + cythonType + "]&"; - if (is_ref) cythonType = cythonType + "&"; - if (is_const) cythonType = "const " + cythonType; - } - - file.oss << cythonType << " " << name; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pyx(FileWriter& file) const { - file.oss << type.pyxArgumentType() << " " << name; -} - -/* ************************************************************************* */ -std::string Argument::pyx_convertEigenTypeAndStorageOrder() const { - if (!type.isEigen()) - return ""; - return name + " = " + name + ".astype(float, order=\'F\', copy=False)"; -} - -/* ************************************************************************* */ -std::string Argument::pyx_asParam() const { - string cythonType = type.pxdClassName(); - string cythonVar; - if (type.isNonBasicType()) { - cythonVar = name + "." + type.shared_pxd_obj_in_pyx(); - if (!is_ptr) cythonVar = "deref(" + cythonVar + ")"; - } else if (type.isEigen()) { - cythonVar = "<" + cythonType + ">" + "(Map[" + cythonType + "](" + name + "))"; - } else { - cythonVar = name; - } - return cythonVar; -} - -/* ************************************************************************* */ -string ArgumentList::types() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.type.name(); - first = false; - } - return str; -} - -/* ************************************************************************* */ -string ArgumentList::signature() const { - string sig; - bool cap = false; - - for(Argument arg: *this) { - for(char ch: arg.type.name()) - if (isupper(ch)) { - sig += ch; - //If there is a capital letter, we don't want to read it below - cap = true; - } - if (!cap) - sig += arg.type.name()[0]; - //Reset to default - cap = false; - } - - return sig; -} - -/* ************************************************************************* */ -string ArgumentList::names() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.name; - first = false; - } - return str; -} - -/* ************************************************************************* */ -bool ArgumentList::allScalar() const { - for(Argument arg: *this) - if (!arg.type.isScalar()) - return false; - return true; -} - -/* ************************************************************************* */ -void ArgumentList::matlab_unwrap(FileWriter& file, int start) const { - int index = start; - for(Argument arg: *this) { - stringstream buf; - buf << "in[" << index << "]"; - arg.matlab_unwrap(file, buf.str()); - index++; - } -} - -/* ************************************************************************* */ -void ArgumentList::emit_prototype(FileWriter& file, const string& name) const { - file.oss << name << "("; - bool first = true; - for(Argument arg: *this) { - if (!first) - file.oss << ", "; - file.oss << arg.type.name() << " " << arg.name; - first = false; - } - file.oss << ")"; -} - -/* ************************************************************************* */ -void ArgumentList::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - for (size_t j = 0; j(__params[" + std::to_string(j) + "])\n"; - return s; -} - -/* ************************************************************************* */ -void ArgumentList::proxy_check(FileWriter& proxyFile) const { - // Check nr of arguments - proxyFile.oss << "if length(varargin) == " << size(); - if (size() > 0) - proxyFile.oss << " && "; - // ...and their type.names - bool first = true; - for (size_t i = 0; i < size(); i++) { - if (!first) - proxyFile.oss << " && "; - string s = "varargin{" + boost::lexical_cast(i + 1) + "}"; - (*this)[i].proxy_check(proxyFile, s); - first = false; - } - proxyFile.oss << "\n"; -} - -/* ************************************************************************* */ - diff --git a/wrap/Argument.h b/wrap/Argument.h deleted file mode 100644 index c08eb0be9..000000000 --- a/wrap/Argument.h +++ /dev/null @@ -1,242 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.h - * @brief arguments to constructors and methods - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "ReturnValue.h" - -namespace wrap { - -/// Argument class -struct Argument { - Qualified type; - std::string name; - bool is_const, is_ref, is_ptr; - - Argument() : - is_const(false), is_ref(false), is_ptr(false) { - } - - Argument(const Qualified& t, const std::string& n) : - type(t), name(n), is_const(false), is_ref(false), is_ptr(false) { - } - - bool isSameSignature(const Argument& other) const { - return type == other.type - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - bool operator==(const Argument& other) const { - return type == other.type && name == other.name - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - Argument expandTemplate(const TemplateSubstitution& ts) const; - - /// return MATLAB class for use in isa(x,class) - std::string matlabClass(const std::string& delim = "") const; - - /// MATLAB code generation, MATLAB to C++ - void matlab_unwrap(FileWriter& file, const std::string& matlabName) const; - - /** - * emit checking argument to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile, const std::string& s) const; - - /** - * emit arguments for cython pxd - * @param file output stream - */ - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParam() const; - std::string pyx_convertEigenTypeAndStorageOrder() const; - - friend std::ostream& operator<<(std::ostream& os, const Argument& arg) { - os << (arg.is_const ? "const " : "") << arg.type << (arg.is_ptr ? "*" : "") - << (arg.is_ref ? "&" : ""); - return os; - } - -}; - -/// Argument list is just a container with Arguments -struct ArgumentList: public std::vector { - - /// create a comma-separated string listing all argument types (not used) - std::string types() const; - - /// create a short "signature" string - std::string signature() const; - - /// create a comma-separated string listing all argument names, used in m-files - std::string names() const; - - /// Check if all arguments scalar - bool allScalar() const; - - ArgumentList expandTemplate(const TemplateSubstitution& ts) const; - - bool isSameSignature(const ArgumentList& other) const { - for(size_t i = 0; i& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParams() const; - std::string pyx_paramsList() const; - std::string pyx_castParamsToPythonType(const std::string& indent) const; - std::string pyx_convertEigenTypeAndStorageOrder(const std::string& indent) const; - - /** - * emit checking arguments to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile) const; - - /// Output stream operator - friend std::ostream& operator<<(std::ostream& os, - const ArgumentList& argList) { - os << "("; - if (argList.size() > 0) - os << argList.front(); - if (argList.size() > 1) - for (size_t i = 1; i < argList.size(); i++) - os << ", " << argList[i]; - os << ")"; - return os; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentGrammar: public classic::grammar { - - wrap::Argument& result_; ///< successful parse will be placed in here - TypeGrammar argument_type_g; ///< Type parser for Argument::type - - /// Construct type grammar and specify where result is placed - ArgumentGrammar(wrap::Argument& result) : - result_(result), argument_type_g(result.type) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule argument_p; - - definition(ArgumentGrammar const& self) { - using namespace classic; - - // NOTE: allows for pointers to all types - // Slightly more permissive than before on basis/eigen type qualification - // Also, currently parses Point2*&, can't make it work otherwise :-( - argument_p = !str_p("const")[assign_a(self.result_.is_const, T)] // - >> self.argument_type_g // - >> !ch_p('*')[assign_a(self.result_.is_ptr, T)] - >> !ch_p('&')[assign_a(self.result_.is_ref, T)] - >> BasicRules::name_p[assign_a(self.result_.name)]; - } - - Rule const& start() const { - return argument_p; - } - - }; -}; -// ArgumentGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentListGrammar: public classic::grammar { - - wrap::ArgumentList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - ArgumentListGrammar(wrap::ArgumentList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - const Argument arg0; ///< used to reset arg - Argument arg; ///< temporary argument for use during parsing - ArgumentGrammar argument_g; ///< single Argument parser - - classic::rule argument_p, argumentList_p; - - definition(ArgumentListGrammar const& self) : - argument_g(arg) { - using namespace classic; - - argument_p = argument_g // - [classic::push_back_a(self.result_, arg)] // - [assign_a(arg, arg0)]; - - argumentList_p = '(' >> !argument_p >> *(',' >> argument_p) >> ')'; - } - - classic::rule const& start() const { - return argumentList_p; - } - - }; -}; -// ArgumentListGrammar - -/* ************************************************************************* */ - -}// \namespace wrap - diff --git a/wrap/CMakeLists.txt b/wrap/CMakeLists.txt deleted file mode 100644 index c04a44edb..000000000 --- a/wrap/CMakeLists.txt +++ /dev/null @@ -1,45 +0,0 @@ -# Build/install Wrap - -set(WRAP_BOOST_LIBRARIES - Boost::system - Boost::filesystem - Boost::thread -) - -# Allow for disabling serialization to handle errors related to Clang's linker -option(GTSAM_WRAP_SERIALIZATION "If enabled, allows for wrapped objects to be saved via boost.serialization" ON) - -# Build the executable itself -file(GLOB wrap_srcs "*.cpp") -file(GLOB wrap_headers "*.h") -list(REMOVE_ITEM wrap_srcs ${CMAKE_CURRENT_SOURCE_DIR}/wrap.cpp) -add_library(wrap_lib STATIC ${wrap_srcs} ${wrap_headers}) -target_include_directories(wrap_lib PUBLIC - $ -) -if (NOT GTSAM_WRAP_SERIALIZATION) - target_compile_definitions(wrap_lib PUBLIC -DWRAP_DISABLE_SERIALIZE) -endif() - -# Apply build flags: -gtsam_apply_build_flags(wrap_lib) - -target_link_libraries(wrap_lib ${WRAP_BOOST_LIBRARIES}) -gtsam_assign_source_folders(${wrap_srcs} ${wrap_headers}) -add_executable(wrap wrap.cpp) -target_link_libraries(wrap PRIVATE wrap_lib) - -# Set folder in Visual Studio -file(RELATIVE_PATH relative_path "${PROJECT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}") -set_target_properties(wrap_lib wrap PROPERTIES FOLDER "${relative_path}") - -# Install wrap binary and export target -install(TARGETS wrap EXPORT GTSAM-exports DESTINATION ${CMAKE_INSTALL_BINDIR}) -list(APPEND GTSAM_EXPORTED_TARGETS wrap) -set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) - -# Install matlab header -install(FILES matlab.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/wrap) - -# Build tests -add_subdirectory(tests) diff --git a/wrap/Class.cpp b/wrap/Class.cpp deleted file mode 100644 index 65ce9eab7..000000000 --- a/wrap/Class.cpp +++ /dev/null @@ -1,897 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.cpp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Class.h" -#include "utilities.h" -#include "Argument.h" -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include // std::ostream_iterator -//#include // on Linux GCC: fails with error regarding needing C++0x std flags -//#include // same failure as above -#include // works on Linux GCC -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void Class::assignParent(const Qualified& parent) { - parentClass.reset(parent); -} - -/* ************************************************************************* */ -boost::optional Class::qualifiedParent() const { - boost::optional result = boost::none; - if (parentClass) - result = parentClass->qualifiedName("::"); - return result; -} - -/* ************************************************************************* */ -static void handleException(const out_of_range& oor, - const Class::Methods& methods) { - cerr << "Class::method: key not found: " << oor.what() << ", methods are:\n"; - using boost::adaptors::map_keys; - ostream_iterator out_it(cerr, "\n"); - boost::copy(methods | map_keys, out_it); -} - -/* ************************************************************************* */ -// Method& Class::mutableMethod(Str key) { -// try { -// return methods_.at(key); -// } catch (const out_of_range& oor) { -// handleException(oor, methods_); -// throw runtime_error("Internal error in wrap"); -// } -// } - -/* ************************************************************************* */ -const Method& Class::method(Str key) const { - try { - return methods_.at(key); - } catch (const out_of_range& oor) { - handleException(oor, methods_); - throw runtime_error("Internal error in wrap"); - } -} - -/* ************************************************************************* */ -void Class::matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - vector& functionNames) const { - - // Create namespace folders - createNamespaceStructure(namespaces(), toolboxPath); - - // open destination classFile - string classFile = matlabName(toolboxPath); - FileWriter proxyFile(classFile, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - // emit class proxy code - // we want our class to inherit the handle class for memory purposes - const string parent = - parentClass ? parentClass->qualifiedName(".") : "handle"; - comment_fragment(proxyFile); - proxyFile.oss << "classdef " << name() << " < " << parent << endl; - proxyFile.oss << " properties\n"; - proxyFile.oss << " ptr_" << matlabUniqueName << " = 0\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " methods\n"; - - // Constructor - proxyFile.oss << " function obj = " << name() << "(varargin)\n"; - // Special pointer constructors - one in MATLAB to create an object and - // assign a pointer returned from a C++ function. In turn this MATLAB - // constructor calls a special C++ function that just adds the object to - // its collector. This allows wrapped functions to return objects in - // other wrap modules - to add these to their collectors the pointer is - // passed from one C++ module into matlab then back into the other C++ - // module. - pointer_constructor_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - wrapperFile.oss << "\n"; - - // Regular constructors - boost::optional cppBaseName = qualifiedParent(); - for (size_t i = 0; i < constructor.nrOverloads(); i++) { - ArgumentList args = constructor.argumentList(i); - const int id = (int) functionNames.size(); - constructor.proxy_fragment(proxyFile, wrapperName, (bool) parentClass, id, - args); - const string wrapFunctionName = constructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, cppBaseName, id, args); - wrapperFile.oss << "\n"; - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss << " error('Arguments do not match any overload of " - << matlabQualName << " constructor');\n"; - proxyFile.oss << " end\n"; - if (parentClass) - proxyFile.oss << " obj = obj@" << parentClass->qualifiedName(".") - << "(uint64(" << ptr_constructor_key << "), base_ptr);\n"; - proxyFile.oss << " obj.ptr_" << matlabUniqueName << " = my_ptr;\n"; - proxyFile.oss << " end\n\n"; - - // Deconstructor - { - const int id = (int) functionNames.size(); - deconstructor.proxy_fragment(proxyFile, wrapperName, matlabUniqueName, id); - proxyFile.oss << "\n"; - const string functionName = deconstructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, id); - wrapperFile.oss << "\n"; - functionNames.push_back(functionName); - } - proxyFile.oss - << " function display(obj), obj.print(''); end\n %DISPLAY Calls print on the object\n"; - proxyFile.oss - << " function disp(obj), obj.display; end\n %DISP Calls print on the object\n"; - - // Methods - for(const Methods::value_type& name_m: methods_) { - const Method& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - serialization_fragments(proxyFile, wrapperFile, wrapperName, functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "\n"; - proxyFile.oss << " methods(Static = true)\n"; - - // Static methods - for(const StaticMethods::value_type& name_m: static_methods) { - const StaticMethod& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - deserialization_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "end\n"; - - // Close file - proxyFile.emit(true); -} - -/* ************************************************************************* */ -void Class::pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - const int collectorInsertId = (int) functionNames.size(); - const string collectorInsertFunctionName = matlabUniqueName - + "_collectorInsertAndMakeBase_" - + boost::lexical_cast(collectorInsertId); - functionNames.push_back(collectorInsertFunctionName); - - int upcastFromVoidId; - string upcastFromVoidFunctionName; - if (isVirtual) { - upcastFromVoidId = (int) functionNames.size(); - upcastFromVoidFunctionName = matlabUniqueName + "_upcastFromVoid_" - + boost::lexical_cast(upcastFromVoidId); - functionNames.push_back(upcastFromVoidFunctionName); - } - - // MATLAB constructor that assigns pointer to matlab object then calls c++ - // function to add the object to the collector. - if (isVirtual) { - proxyFile.oss - << " if (nargin == 2 || (nargin == 3 && strcmp(varargin{3}, 'void')))"; - } else { - proxyFile.oss << " if nargin == 2"; - } - proxyFile.oss << " && isa(varargin{1}, 'uint64') && varargin{1} == uint64(" - << ptr_constructor_key << ")\n"; - if (isVirtual) { - proxyFile.oss << " if nargin == 2\n"; - proxyFile.oss << " my_ptr = varargin{2};\n"; - proxyFile.oss << " else\n"; - proxyFile.oss << " my_ptr = " << wrapperName << "(" - << upcastFromVoidId << ", varargin{2});\n"; - proxyFile.oss << " end\n"; - } else { - proxyFile.oss << " my_ptr = varargin{2};\n"; - } - if (!parentClass) // If this class has a base class, we'll get a base class pointer back - proxyFile.oss << " "; - else - proxyFile.oss << " base_ptr = "; - proxyFile.oss << wrapperName << "(" << collectorInsertId << ", my_ptr);\n"; // Call collector insert and get base class ptr - - // C++ function to add pointer from MATLAB to collector. The pointer always - // comes from a C++ return value; this mechanism allows the object to be added - // to a collector in a different wrap module. If this class has a base class, - // a new pointer to the base class is allocated and returned. - wrapperFile.oss << "void " << collectorInsertFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - wrapperFile.oss << " typedef boost::shared_ptr<" << cppName << "> Shared;\n"; - wrapperFile.oss << "\n"; - // Get self pointer passed in - wrapperFile.oss - << " Shared *self = *reinterpret_cast (mxGetData(in[0]));\n"; - // Add to collector - wrapperFile.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - // If we have a base class, return the base class pointer (MATLAB will call the base class collectorInsertAndMakeBase to add this to the collector and recurse the heirarchy) - boost::optional cppBaseName = qualifiedParent(); - if (cppBaseName) { - wrapperFile.oss << "\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << *cppBaseName - << "> SharedBase;\n"; - wrapperFile.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n"; - wrapperFile.oss - << " *reinterpret_cast(mxGetData(out[0])) = new SharedBase(*self);\n"; - } - wrapperFile.oss << "}\n"; - - // If this is a virtual function, C++ function to dynamic upcast it from a - // shared_ptr. This mechanism allows automatic dynamic creation of the - // real underlying derived-most class when a C++ method returns a virtual - // base class. - if (isVirtual) - wrapperFile.oss << "\n" - "void " << upcastFromVoidFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[]) {\n" - " mexAtExit(&_deleteAllObjects);\n" - " typedef boost::shared_ptr<" << cppName - << "> Shared;\n" - " boost::shared_ptr *asVoid = *reinterpret_cast**> (mxGetData(in[0]));\n" - " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n" - " Shared *self = new Shared(boost::static_pointer_cast<" << cppName - << ">(*asVoid));\n" - " *reinterpret_cast(mxGetData(out[0])) = self;\n" - "}\n"; -} - -/* ************************************************************************* */ -Class Class::expandTemplate(const TemplateSubstitution& ts) const { - Class inst = *this; - inst.methods_ = expandMethodTemplate(methods_, ts); - inst.static_methods = expandMethodTemplate(static_methods, ts); - inst.constructor = constructor.expandTemplate(ts); - inst.deconstructor.name = inst.name(); - return inst; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& instantiations) const { - vector result; - for(const Qualified& instName: instantiations) { - Qualified expandedClass = (Qualified) (*this); - expandedClass.expand(instName.name()); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName.qualifiedName("::") - + ">"; - inst.templateInstTypeList.push_back(instName); - inst.templateClass = *this; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& integers) const { - vector result; - for(int i: integers) { - Qualified expandedClass = (Qualified) (*this); - stringstream ss; ss << i; - string instName = ss.str(); - expandedClass.expand(instName); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName + ">"; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -void Class::addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, - const ReturnValue& returnValue, const Template& tmplate) { - // Check if templated - if (tmplate.valid()) { - try { - templateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - tmplate.argName(), verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - // Create method to expand - // For all values of the template argument, create a new method - for (const Qualified& instName : tmplate.argValues()) { - const TemplateSubstitution ts(tmplate.argName(), instName, *this); - // substitute template in arguments - ArgumentList expandedArgs = argumentList.expandTemplate(ts); - // do the same for return type - ReturnValue expandedRetVal = returnValue.expandTemplate(ts); - // Now stick in new overload stack with expandedMethodName key - // but note we use the same, unexpanded methodName in overload - string expandedMethodName = methodName + instName.name(); - try { - methods_[expandedMethodName].addOverload(methodName, expandedArgs, - expandedRetVal, is_const, - instName, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + expandedMethodName + "\n" + e.what()); - } - } - } else { - try { - // just add overload - methods_[methodName].addOverload(methodName, argumentList, returnValue, - is_const, boost::none, verbose); - nontemplateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - boost::none, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - } -} - -/* ************************************************************************* */ -void Class::erase_serialization(Methods& methods) { - Methods::iterator it = methods.find("serializable"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; -#else - // cout << "Ignoring serializable() flag in class " << name << endl; -#endif - methods.erase(it); - } - - it = methods.find("serialize"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; - hasSerialization = true; -#else - // cout << "Ignoring serialize() flag in class " << name << endl; -#endif - methods.erase(it); - } -} - -void Class::erase_serialization() { - erase_serialization(methods_); - erase_serialization(nontemplateMethods_); -} - -/* ************************************************************************* */ -void Class::verifyAll(vector& validTypes, bool& hasSerialiable) const { - - hasSerialiable |= isSerializable; - - // verify all of the function arguments - //TODO:verifyArguments(validTypes, constructor.args_list); - verifyArguments(validTypes, static_methods); - verifyArguments(validTypes, methods_); - - // verify function return types - verifyReturnTypes(validTypes, static_methods); - verifyReturnTypes(validTypes, methods_); - - // verify parents - boost::optional parent = qualifiedParent(); - if (parent - && find(validTypes.begin(), validTypes.end(), *parent) - == validTypes.end()) - throw DependencyMissing(*parent, qualifiedName("::")); -} - -/* ************************************************************************* */ -void Class::appendInheritedMethods(const Class& cls, - const vector& classes) { - - if (cls.parentClass) { - - // Find parent - for(const Class& parent: classes) { - // We found a parent class for our parent, TODO improve ! - if (parent.name() == cls.parentClass->name()) { - methods_.insert(parent.methods_.begin(), parent.methods_.end()); - appendInheritedMethods(parent, classes); - } - } - } -} - -/* ************************************************************************* */ -void Class::removeInheritedNontemplateMethods(vector& classes) { - if (!parentClass) return; - // Find parent - auto parentIt = std::find_if(classes.begin(), classes.end(), - [&](const Class& cls) { return cls.name() == parentClass->name(); }); - if (parentIt == classes.end()) return; // ignore if parent not found - Class& parent = *parentIt; - - // Only check nontemplateMethods_ - for(const string& methodName: nontemplateMethods_ | boost::adaptors::map_keys) { - // check if the method exists in its parent - // Check against parent's methods_ because all the methods of grand - // parent and grand-grand-parent, etc. are already included there - // This is to avoid looking into higher level grand parents... - auto it = parent.methods_.find(methodName); - if (it == parent.methods_.end()) continue; // if not: ignore! - - Method& parentMethod = it->second; - Method& method = nontemplateMethods_[methodName]; - // check if they have the same modifiers (const/static/templateArgs) - if (!method.isSameModifiers(parentMethod)) continue; // if not: ignore - - // check and remove duplicate overloads - auto methodOverloads = boost::combine(method.returnVals_, method.argLists_); - auto parentMethodOverloads = boost::combine(parentMethod.returnVals_, parentMethod.argLists_); - auto result = boost::remove_if( - methodOverloads, - [&](boost::tuple const& overload) { - bool found = std::find_if( - parentMethodOverloads.begin(), - parentMethodOverloads.end(), - [&](boost::tuple const& - parentOverload) { - return overload.get<0>() == parentOverload.get<0>() && - overload.get<1>().isSameSignature(parentOverload.get<1>()); - }) != parentMethodOverloads.end(); - return found; - }); - // remove all duplicate overloads - method.returnVals_.erase(boost::get<0>(result.get_iterator_tuple()), - method.returnVals_.end()); - method.argLists_.erase(boost::get<1>(result.get_iterator_tuple()), - method.argLists_.end()); - } - // [Optional] remove the entire method if it has no overload - for (auto it = nontemplateMethods_.begin(), ite = nontemplateMethods_.end(); it != ite;) - if (it->second.nrOverloads() == 0) it = nontemplateMethods_.erase(it); else ++it; -} - -/* ************************************************************************* */ -string Class::getTypedef() const { - string result; - for(Str namesp: namespaces()) { - result += ("namespace " + namesp + " { "); - } - result += ("typedef " + typedefName + " " + name() + ";"); - for (size_t i = 0; i < namespaces().size(); ++i) { - result += " }"; - } - return result; -} - -/* ************************************************************************* */ -void Class::comment_fragment(FileWriter& proxyFile) const { - proxyFile.oss << "%class " << name() << ", see Doxygen page for details\n"; - proxyFile.oss - << "%at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - - constructor.comment_fragment(proxyFile); - - if (!methods_.empty()) - proxyFile.oss << "%\n%-------Methods-------\n"; - for(const Methods::value_type& name_m: methods_) - name_m.second.comment_fragment(proxyFile); - - if (!static_methods.empty()) - proxyFile.oss << "%\n%-------Static Methods-------\n"; - for(const StaticMethods::value_type& name_m: static_methods) - name_m.second.comment_fragment(proxyFile); - - if (hasSerialization) { - proxyFile.oss << "%\n%-------Serialization Interface-------\n"; - proxyFile.oss << "%string_serialize() : returns string\n"; - proxyFile.oss << "%string_deserialize(string serialized) : returns " - << name() << "\n"; - } - - proxyFile.oss << "%\n"; -} - -/* ************************************************************************* */ -void Class::serialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - -//void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) -//{ -// typedef boost::shared_ptr Shared; -// checkArguments("string_serialize",nargout,nargin-1,0); -// Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); -// ostringstream out_archive_stream; -// boost::archive::text_oarchive out_archive(out_archive_stream); -// out_archive << *obj; -// out[0] = wrap< string >(out_archive_stream.str()); -//} - - int serialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameSerialize = matlabUniqueName - + "_string_serialize_" + boost::lexical_cast(serialize_id); - functionNames.push_back(wrapFunctionNameSerialize); - - // call - //void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - wrapperFile.oss << "void " << wrapFunctionNameSerialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for serialize, no arguments - // example: checkArguments("string_serialize",nargout,nargin-1,0); - wrapperFile.oss - << " checkArguments(\"string_serialize\",nargout,nargin-1,0);\n"; - - // get class pointer - // example: Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); - wrapperFile.oss << " Shared obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // Serialization boilerplate - wrapperFile.oss << " ostringstream out_archive_stream;\n"; - wrapperFile.oss - << " boost::archive::text_oarchive out_archive(out_archive_stream);\n"; - wrapperFile.oss << " out_archive << *obj;\n"; - wrapperFile.oss << " out[0] = wrap< string >(out_archive_stream.str());\n"; - - // finish - wrapperFile.oss << "}\n"; - - // Generate code for matlab function -// function varargout string_serialize(this, varargin) -// % STRING_SERIALIZE usage: string_serialize() : returns string -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 0 -// varargout{1} = geometry_wrapper(15, this, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_serialize'); -// end -// end - - proxyFile.oss - << " function varargout = string_serialize(this, varargin)\n"; - proxyFile.oss - << " % STRING_SERIALIZE usage: string_serialize() : returns string\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 0\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(serialize_id) << ", this, varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_serialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate code for matlab save function -// function sobj = saveobj(obj) -// % SAVEOBJ Saves the object to a matlab-readable format -// sobj = obj.string_serialize(); -// end - - proxyFile.oss << " function sobj = saveobj(obj)\n"; - proxyFile.oss - << " % SAVEOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " sobj = obj.string_serialize();\n"; - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -void Class::deserialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - //void Point3_string_deserialize_18(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - //{ - // typedef boost::shared_ptr Shared; - // checkArguments("Point3.string_deserialize",nargout,nargin,1); - // string serialized = unwrap< string >(in[0]); - // istringstream in_archive_stream(serialized); - // boost::archive::text_iarchive in_archive(in_archive_stream); - // Shared output(new Point3(0,0,0)); - // in_archive >> *output; - // out[0] = wrap_shared_ptr(output,"Point3", false); - //} - int deserialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameDeserialize = matlabUniqueName - + "_string_deserialize_" + boost::lexical_cast(deserialize_id); - functionNames.push_back(wrapFunctionNameDeserialize); - - // call - wrapperFile.oss << "void " << wrapFunctionNameDeserialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for deserialize, 1 string argument - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName - << ".string_deserialize\",nargout,nargin,1);\n"; - - // string argument with deserialization boilerplate - wrapperFile.oss << " string serialized = unwrap< string >(in[0]);\n"; - wrapperFile.oss << " istringstream in_archive_stream(serialized);\n"; - wrapperFile.oss - << " boost::archive::text_iarchive in_archive(in_archive_stream);\n"; - wrapperFile.oss << " Shared output(new " << cppClassName << "());\n"; - wrapperFile.oss << " in_archive >> *output;\n"; - wrapperFile.oss << " out[0] = wrap_shared_ptr(output,\"" << matlabQualName - << "\", false);\n"; - wrapperFile.oss << "}\n"; - - // Generate matlab function -// function varargout = string_deserialize(varargin) -// % STRING_DESERIALIZE usage: string_deserialize() : returns Point3 -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 1 -// varargout{1} = geometry_wrapper(18, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_deserialize'); -// end -// end - - proxyFile.oss << " function varargout = string_deserialize(varargin)\n"; - proxyFile.oss - << " % STRING_DESERIALIZE usage: string_deserialize() : returns " - << matlabQualName << "\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 1\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(deserialize_id) << ", varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_deserialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate matlab load function -// function obj = loadobj(sobj) -// % LOADOBJ Saves the object to a matlab-readable format -// obj = Point3.string_deserialize(sobj); -// end - - proxyFile.oss << " function obj = loadobj(sobj)\n"; - proxyFile.oss - << " % LOADOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " obj = " << matlabQualName - << ".string_deserialize(sobj);\n"; - proxyFile.oss << " end" << endl; -} - -/* ************************************************************************* */ -string Class::getSerializationExport() const { - //BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsamSharedDiagonal"); - return "BOOST_CLASS_EXPORT_GUID(" + qualifiedName("::") + ", \"" - + qualifiedName() + "\");"; -} - -/* ************************************************************************* */ -void Class::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "class_<" << name() << ">(\"" << name() << "\")\n"; - constructor.python_wrapper(wrapperFile, name()); - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - for(const Method& m: methods_ | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - wrapperFile.oss << ";\n\n"; -} - -/* ************************************************************************* */ -void Class::emit_cython_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "cdef extern from \"" << includeFile << "\""; - string ns = qualifiedNamespaces("::"); - if (!ns.empty()) - pxdFile.oss << " namespace \"" << ns << "\""; - pxdFile.oss << ":" << endl; - pxdFile.oss << " cdef cppclass " << pxdClassName() << " \"" << qualifiedName("::") << "\""; - if (templateArgs.size()>0) { - pxdFile.oss << "["; - for(size_t i = 0; ipxdClassName() << ")"; - pxdFile.oss << ":\n"; - - constructor.emit_cython_pxd(pxdFile, *this); - if (constructor.nrOverloads()>0) pxdFile.oss << "\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; - - for(const Method& m: nontemplateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - - for(const TemplateMethod& m: templateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - size_t numMethods = constructor.nrOverloads() + static_methods.size() + - methods_.size() + templateMethods_.size(); - if (numMethods == 0) - pxdFile.oss << " pass\n"; -} -/* ************************************************************************* */ -void Class::emit_cython_wrapper_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "\ncdef class " << pyxClassName(); - if (getParent()) - pxdFile.oss << "(" << getParent()->pyxClassName() << ")"; - pxdFile.oss << ":\n"; - pxdFile.oss << " cdef " << shared_pxd_class_in_pyx() << " " - << shared_pxd_obj_in_pyx() << "\n"; - // cyCreateFromShared - pxdFile.oss << " @staticmethod\n"; - pxdFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other)\n"; - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_wrapper_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; -} - -/* ************************************************************************* */ -void Class::pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const { - if (parentClass) { - pyxFile.oss << pyObj << "." << parentClass->shared_pxd_obj_in_pyx() << " = " - << "<" << parentClass->shared_pxd_class_in_pyx() << ">(" - << cySharedObj << ")\n"; - // Find the parent class with name "parentClass" and point its cython obj - // to the same pointer - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [this](const Class& cls) { - return cls.pxdClassName() == - this->parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - parent_it->pyxInitParentObj(pyxFile, pyObj, cySharedObj, allClasses); - } -} - -/* ************************************************************************* */ -void Class::pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const { - std::string me = this->pyxClassName(), sharedMe = this->shared_pxd_class_in_pyx(); - if (curLevel.parentClass) { - std::string parent = curLevel.parentClass->pyxClassName(), - parentObj = curLevel.parentClass->shared_pxd_obj_in_pyx(), - parentCythonClass = curLevel.parentClass->pxd_class_in_pyx(); - pyxFile.oss << "def dynamic_cast_" << me << "_" << parent << "(" << parent - << " parent):\n"; - pyxFile.oss << " try:\n"; - pyxFile.oss << " return " << me << ".cyCreateFromShared(<" << sharedMe - << ">dynamic_pointer_cast[" << pxd_class_in_pyx() << "," - << parentCythonClass << "](parent." << parentObj - << "))\n"; - pyxFile.oss << " except:\n"; - pyxFile.oss << " raise TypeError('dynamic cast failed!')\n"; - // Move up higher to one level: Find the parent class with name "parentClass" - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [&curLevel](const Class& cls) { - return cls.pxdClassName() == - curLevel.parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - pyxDynamicCast(pyxFile, *parent_it, allClasses); - } -} - -/* ************************************************************************* */ -void Class::emit_cython_pyx(FileWriter& pyxFile, const std::vector& allClasses) const { - pyxFile.oss << "cdef class " << pyxClassName(); - if (parentClass) pyxFile.oss << "(" << parentClass->pyxClassName() << ")"; - pyxFile.oss << ":\n"; - - // __init___ - pyxFile.oss << " def __init__(self, *args, **kwargs):\n"; - pyxFile.oss << " cdef list __params\n"; - pyxFile.oss << " self." << shared_pxd_obj_in_pyx() << " = " << shared_pxd_class_in_pyx() << "()\n"; - pyxFile.oss << " if len(args)==0 and len(kwargs)==1 and kwargs.has_key('cyCreateFromShared'):\n return\n"; - - // Constructors - constructor.emit_cython_pyx(pyxFile, *this); - pyxFile.oss << " if (self." << shared_pxd_obj_in_pyx() << ".use_count()==0):\n"; - pyxFile.oss << " raise TypeError('" << pyxClassName() - << " construction failed!')\n"; - pyxInitParentObj(pyxFile, " self", "self." + shared_pxd_obj_in_pyx(), allClasses); - pyxFile.oss << "\n"; - - // cyCreateFromShared - pyxFile.oss << " @staticmethod\n"; - pyxFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other):\n" - << " if other.get() == NULL:\n" - << " raise RuntimeError('Cannot create object from a nullptr!')\n" - << " cdef " << pyxClassName() << " return_value = " << pyxClassName() << "(cyCreateFromShared=True)\n" - << " return_value." << shared_pxd_obj_in_pyx() << " = other\n"; - pyxInitParentObj(pyxFile, " return_value", "other", allClasses); - pyxFile.oss << " return return_value" << "\n\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - if (static_methods.size()>0) pyxFile.oss << "\n"; - - for(const Method& m: methods_ | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - - pyxDynamicCast(pyxFile, *this, allClasses); - - pyxFile.oss << "\n\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/Class.h b/wrap/Class.h deleted file mode 100644 index 3df37fe67..000000000 --- a/wrap/Class.h +++ /dev/null @@ -1,315 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "spirit.h" -#include "Template.h" -#include "Constructor.h" -#include "Deconstructor.h" -#include "Method.h" -#include "StaticMethod.h" -#include "TemplateMethod.h" -#include "TypeAttributesTable.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -#include -#include - -#include -#include - -namespace wrap { - -/// Class has name, constructors, methods -class Class: public Qualified { - -public: - typedef const std::string& Str; - typedef std::map Methods; - typedef std::map StaticMethods; - typedef std::map TemplateMethods; - -private: - - boost::optional parentClass; ///< The *single* parent - Methods methods_; ///< Class methods, including all expanded/instantiated template methods -- to be serialized to matlab and Python classes in Cython pyx - Methods nontemplateMethods_; ///< only nontemplate methods -- to be serialized into Cython pxd - TemplateMethods templateMethods_; ///< only template methods -- to be serialized into Cython pxd - // Method& mutableMethod(Str key); - -public: - - StaticMethods static_methods; ///< Static methods - - // Then the instance variables are set directly by the Module constructor - std::vector templateArgs; ///< Template arguments - std::string typedefName; ///< The name to typedef *from*, if this class is actually a typedef, i.e. typedef [typedefName] [name] - std::vector templateInstTypeList; ///< the original typelist used to instantiate this class from a template. - ///< Empty if it's not an instantiation. Needed for template classes in Cython pxd. - boost::optional templateClass = boost::none; ///< qualified name of the original template class from which this class was instantiated. - ///< boost::none if not an instantiation. Needed for template classes in Cython pxd. - bool isVirtual; ///< Whether the class is part of a virtual inheritance chain - bool isSerializable; ///< Whether we can use boost.serialization to serialize the class - creates exports - bool hasSerialization; ///< Whether we should create the serialization functions - Constructor constructor; ///< Class constructors - Deconstructor deconstructor; ///< Deconstructor to deallocate C++ object - bool verbose_; ///< verbose flag - std::string includeFile; - - /// Constructor creates an empty class - Class(bool verbose = true) : - parentClass(boost::none), isVirtual(false), isSerializable(false), hasSerialization( - false), deconstructor(verbose), verbose_(verbose) { - } - - Class(const std::string& name, bool verbose = true) - : Qualified(name, Qualified::Category::CLASS), - parentClass(boost::none), - isVirtual(false), - isSerializable(false), - hasSerialization(false), - deconstructor(verbose), - verbose_(verbose) {} - - void assignParent(const Qualified& parent); - - boost::optional qualifiedParent() const; - boost::optional getParent() const { return parentClass; } - - size_t nrMethods() const { - return methods_.size(); - } - - const Method& method(Str key) const; - - bool exists(Str name) const { - return methods_.find(name) != methods_.end(); - } - - // And finally MATLAB code is emitted, methods below called by Module::matlab_code - void matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - std::vector& functionNames) const; ///< emit proxy class - - Class expandTemplate(const TemplateSubstitution& ts) const; - - std::vector expandTemplate(Str templateArg, - const std::vector& instantiations) const; - - // Create new classes with integer template arguments - std::vector expandTemplate(Str templateArg, - const std::vector& integers) const; - - /// Add potentially overloaded, potentially templated method - void addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, const ReturnValue& returnValue, - const Template& tmplate); - - /// Post-process classes for serialization markers - void erase_serialization(); // non-const ! - void erase_serialization(Methods& methods); // non-const ! - - /// verify all of the function arguments - void verifyAll(std::vector& functionNames, - bool& hasSerialiable) const; - - void appendInheritedMethods(const Class& cls, - const std::vector& classes); - - void removeInheritedNontemplateMethods(std::vector& classes); - - /// The typedef line for this class, if this class is a typedef, otherwise returns an empty string. - std::string getTypedef() const; - - /// Returns the string for an export flag - std::string getSerializationExport() const; - - /// Creates a member function that performs serialization - void serialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - /// Creates a static member function that performs deserialization - void deserialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_wrapper_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile, - const std::vector& allClasses) const; - void pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const; - void pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const; - - friend std::ostream& operator<<(std::ostream& os, const Class& cls) { - os << "class " << cls.name() << "{\n"; - os << cls.constructor << ";\n"; - for(const StaticMethod& m: cls.static_methods | boost::adaptors::map_values) - os << m << ";\n"; - for(const Method& m: cls.methods_ | boost::adaptors::map_values) - os << m << ";\n"; - os << "};" << std::endl; - return os; - } - -private: - - void pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - std::vector& functionNames) const; - - void comment_fragment(FileWriter& proxyFile) const; -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ClassGrammar: public classic::grammar { - - Class& cls_; ///< successful parse will be placed in here - Template& template_; ///< result needs to be visible outside - - /// Construct type grammar and specify where result is placed - ClassGrammar(Class& cls, Template& t) : - cls_(cls), template_(t) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - using BasicRules::name_p; - using BasicRules::className_p; - using BasicRules::comments_p; - - // NOTE: allows for pointers to all types - ArgumentList args; - ArgumentListGrammar argumentList_g; - - Constructor constructor0, constructor; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Template methodTemplate; - TemplateGrammar methodTemplate_g, classTemplate_g; - - std::string methodName; - bool isConst, T, F; - - // Parent class - Qualified possibleParent; - TypeGrammar classParent_g; - - classic::rule constructor_p, methodName_p, method_p, - staticMethodName_p, static_method_p, templateList_p, classParent_p, - functions_p, class_p; - - definition(ClassGrammar const& self) : - argumentList_g(args), returnValue_g(retVal), // - methodTemplate_g(methodTemplate), classTemplate_g(self.template_), // - T(true), F(false), classParent_g(possibleParent) { - - using namespace classic; - bool verbose = false; // TODO - - // ConstructorGrammar - constructor_p = (className_p >> argumentList_g >> ';' >> !comments_p) // - [bl::bind(&Constructor::push_back, bl::var(constructor), - bl::var(args))] // - [clear_a(args)]; - - // MethodGrammar - methodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // gtsam::Values retract(const gtsam::VectorValues& delta) const; - method_p = !methodTemplate_g - >> (returnValue_g >> methodName_p[assign_a(methodName)] - >> argumentList_g >> !str_p("const")[assign_a(isConst, T)] >> ';' - >> *comments_p) // - [bl::bind(&Class::addMethod, bl::var(self.cls_), verbose, - bl::var(isConst), bl::var(methodName), bl::var(args), - bl::var(retVal), bl::var(methodTemplate))] // - [assign_a(retVal, retVal0)][clear_a(args)] // - [clear_a(methodTemplate)][assign_a(isConst, F)]; - - // StaticMethodGrammar - staticMethodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - static_method_p = (str_p("static") >> returnValue_g - >> staticMethodName_p[assign_a(methodName)] >> argumentList_g >> ';' - >> *comments_p) // - [bl::bind(&StaticMethod::addOverload, - bl::var(self.cls_.static_methods)[bl::var(methodName)], - bl::var(methodName), bl::var(args), bl::var(retVal), boost::none, - verbose)] // - [assign_a(retVal, retVal0)][clear_a(args)]; - - // template - templateList_p = (str_p("template") >> '<' - >> name_p[push_back_a(self.cls_.templateArgs)] - >> *(',' >> name_p[push_back_a(self.cls_.templateArgs)]) >> '>'); - - // parse a full class - classParent_p = (':' >> classParent_g >> '{') // - [bl::bind(&Class::assignParent, bl::var(self.cls_), - bl::var(possibleParent))][clear_a(possibleParent)]; - - functions_p = constructor_p | method_p | static_method_p; - - // parse a full class - class_p = (!(classTemplate_g[push_back_a(self.cls_.templateArgs, - self.template_.argName())] | templateList_p) - >> !(str_p("virtual")[assign_a(self.cls_.isVirtual, T)]) - >> str_p("class") >> className_p[assign_a(self.cls_.name_)] - >> (classParent_p | '{') >> // - *(functions_p | comments_p) >> str_p("};")) // - [bl::bind(&Constructor::initializeOrCheck, bl::var(constructor), - bl::var(self.cls_.name_), boost::none, verbose)][assign_a( - self.cls_.constructor, constructor)] // - [assign_a(self.cls_.deconstructor.name, self.cls_.name_)] // - [assign_a(constructor, constructor0)]; - } - - classic::rule const& start() const { - return class_p; - } - - }; -}; -// ClassGrammar - -}// \namespace wrap - diff --git a/wrap/Constructor.cpp b/wrap/Constructor.cpp deleted file mode 100644 index 74719b289..000000000 --- a/wrap/Constructor.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include -#include - -#include - -#include "utilities.h" -#include "Constructor.h" -#include "Class.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Constructor::matlab_wrapper_name(Str className) const { - string str = "new_" + className; - return str; -} - -/* ************************************************************************* */ -void Constructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, bool hasParent, - const int id, const ArgumentList args) const { - size_t nrArgs = args.size(); - // check for number of arguments... - file.oss << " elseif nargin == " << nrArgs; - if (nrArgs > 0) file.oss << " && "; - // ...and their types - bool first = true; - for (size_t i = 0; i < nrArgs; i++) { - if (!first) file.oss << " && "; - file.oss << "isa(varargin{" << i + 1 << "},'" << args[i].matlabClass(".") - << "')"; - first = false; - } - // emit code for calling constructor - if (hasParent) - file.oss << "\n [ my_ptr, base_ptr ] = "; - else - file.oss << "\n my_ptr = "; - file.oss << wrapperName << "(" << id; - // emit constructor arguments - for (size_t i = 0; i < nrArgs; i++) { - file.oss << ", "; - file.oss << "varargin{" << i + 1 << "}"; - } - file.oss << ");\n"; -} - -/* ************************************************************************* */ -string Constructor::wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, - boost::optional cppBaseClassName, - int id, const ArgumentList& al) const { - const string wrapFunctionName = - matlabUniqueName + "_constructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" - << endl; - file.oss << "{\n"; - file.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;\n"; - file.oss << "\n"; - - // Check to see if there will be any arguments and remove {} for consiseness - if (al.size() > 0) al.matlab_unwrap(file); // unwrap arguments - file.oss << " Shared *self = new Shared(new " << cppClassName << "(" - << al.names() << "));" << endl; - file.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - - if (verbose_) - file.oss << " std::cout << \"constructed \" << self << std::endl;" << endl; - file.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);" - << endl; - file.oss << " *reinterpret_cast (mxGetData(out[0])) = self;" - << endl; - - // If we have a base class, return the base class pointer (MATLAB will call - // the base class collectorInsertAndMakeBase to add this to the collector and - // recurse the heirarchy) - if (cppBaseClassName) { - file.oss << "\n"; - file.oss << " typedef boost::shared_ptr<" << *cppBaseClassName - << "> SharedBase;\n"; - file.oss << " out[1] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, " - "mxREAL);\n"; - file.oss << " *reinterpret_cast(mxGetData(out[1])) = new " - "SharedBase(*self);\n"; - } - - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void Constructor::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ -bool Constructor::hasDefaultConstructor() const { - for (size_t i = 0; i < nrOverloads(); i++) { - if (argumentList(i).size() == 0) return true; - } - return false; -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - - // generate the constructor - pxdFile.oss << " " << cls.pxdClassName() << "("; - args.emit_cython_pxd(pxdFile, cls.pxdClassName(), cls.templateArgs); - pxdFile.oss << ") " << "except +\n"; - } -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - pyxFile.oss << " try:\n"; - pyxFile.oss << pyx_resolveOverloadParams(args, true, 3); - pyxFile.oss - << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - - pyxFile.oss << " self." << cls.shared_pxd_obj_in_pyx() << " = " - << cls.shared_pxd_class_in_pyx() << "(new " << cls.pxd_class_in_pyx() - << "(" << args.pyx_asParams() << "))\n"; - pyxFile.oss << " except (AssertionError, ValueError):\n"; - pyxFile.oss << " pass\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/Constructor.h b/wrap/Constructor.h deleted file mode 100644 index 172cd24a4..000000000 --- a/wrap/Constructor.h +++ /dev/null @@ -1,99 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include -#include -#include - -namespace wrap { - -// Forward declaration -class Class; - -// Constructor class -struct Constructor: public OverloadedFunction { - - typedef const std::string& Str; - - /// Constructor creates an empty class - Constructor(bool verbose = false) { - verbose_ = verbose; - } - - Constructor expandTemplate(const TemplateSubstitution& ts) const { - Constructor inst = *this; - inst.argLists_ = expandArgumentListsTemplate(ts); - inst.name_ = ts.expandedClassName(); - return inst; - } - - /// return true if the default constructor exists - bool hasDefaultConstructor() const; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(Str className) const; - - void comment_fragment(FileWriter& proxyFile) const { - if (nrOverloads() > 0) - proxyFile.oss << "%\n%-------Constructors-------\n"; - for (size_t i = 0; i < nrOverloads(); i++) { - proxyFile.oss << "%"; - argumentList(i).emit_prototype(proxyFile, name_); - proxyFile.oss << "\n"; - } - } - - /** - * Create fragment to select constructor in proxy class, e.g., - * if nargin == 2, obj.self = new_Pose3_RP(varargin{1},varargin{2}); end - */ - void proxy_fragment(FileWriter& file, Str wrapperName, bool hasParent, - const int id, const ArgumentList args) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, boost::optional cppBaseClassName, int id, - const ArgumentList& al) const; - - /// constructor function - void generate_construct(FileWriter& file, Str cppClassName, - std::vector& args_list) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const; - void emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const; - - friend std::ostream& operator<<(std::ostream& os, const Constructor& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.name_ << m.argLists_[i]; - return os; - } - -}; - -} // \namespace wrap diff --git a/wrap/Deconstructor.cpp b/wrap/Deconstructor.cpp deleted file mode 100644 index 7bb366e3f..000000000 --- a/wrap/Deconstructor.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include - -#include - -#include "utilities.h" -#include "Deconstructor.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Deconstructor::matlab_wrapper_name(const string& className) const { - string str = "delete_" + className; - return str; -} - -/* ************************************************************************* */ -void Deconstructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const { - - file.oss << " function delete(obj)\n"; - file.oss << " " << wrapperName << "(" << id << ", obj.ptr_" << matlabUniqueName << ");\n"; - file.oss << " end\n"; -} - -/* ************************************************************************* */ -string Deconstructor::wrapper_fragment(FileWriter& file, - const string& cppClassName, - const string& matlabUniqueName, - int id) const { - - const string matlabName = matlab_wrapper_name(matlabUniqueName); - - const string wrapFunctionName = matlabUniqueName + "_deconstructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" << endl; - file.oss << "{" << endl; - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;" << endl; - //Deconstructor takes 1 arg, the mxArray obj - file.oss << " checkArguments(\"" << matlabName << "\",nargout,nargin," << "1" << ");" << endl; - file.oss << " Shared *self = *reinterpret_cast(mxGetData(in[0]));\n"; - file.oss << " Collector_" << matlabUniqueName << "::iterator item;\n"; - file.oss << " item = collector_" << matlabUniqueName << ".find(self);\n"; - file.oss << " if(item != collector_" << matlabUniqueName << ".end()) {\n"; - file.oss << " delete self;\n"; - file.oss << " collector_" << matlabUniqueName << ".erase(item);\n"; - file.oss << " }\n"; - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ diff --git a/wrap/Deconstructor.h b/wrap/Deconstructor.h deleted file mode 100644 index ee2f4ea19..000000000 --- a/wrap/Deconstructor.h +++ /dev/null @@ -1,61 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include -#include - -#include "Argument.h" - -namespace wrap { - -// Deconstructor class -struct Deconstructor { - - /// Deconstructor creates an empty class - Deconstructor(bool verbose = true) : - verbose_(verbose) { - } - - // Then the instance variables are set directly by the Module deconstructor - std::string name; - bool verbose_; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(const std::string& className) const; - - /// m-file - void proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, - const std::string& cppClassName, - const std::string& matlabUniqueName, - int id) const; -}; - -} // \namespace wrap - diff --git a/wrap/FileWriter.cpp b/wrap/FileWriter.cpp deleted file mode 100644 index c07de0eb0..000000000 --- a/wrap/FileWriter.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/** - * @file FileWriter.cpp - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#include "FileWriter.h" -#include "utilities.h" - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -FileWriter::FileWriter(const string& filename, bool verbose, - const string& comment_str) : - verbose_(verbose), filename_(filename), comment_str_(comment_str) { -} - -/* ************************************************************************* */ -void FileWriter::emit(bool add_header, bool force_overwrite) const { - if (verbose_) - cerr << "generating " << filename_ << " "; - // read in file if it exists - string existing_contents; - bool file_exists = true; - try { - existing_contents = file_contents(filename_.c_str(), add_header); - } catch (const CantOpenFile& ) { - file_exists = false; - } - - // Only write a file if it is new, an update, or overwrite is forced - string new_contents = oss.str(); - if (force_overwrite || !file_exists || existing_contents != new_contents) { - // Binary to use LF line endings instead of CRLF - ofstream ofs(filename_.c_str(), ios::binary); - if (!ofs) - throw CantOpenFile(filename_); - - // dump in stringstream - ofs << new_contents; - ofs.close(); - } - if (verbose_) - cerr << " ...no update" << endl; -} -/* ************************************************************************* */ - diff --git a/wrap/FileWriter.h b/wrap/FileWriter.h deleted file mode 100644 index 12e033fdf..000000000 --- a/wrap/FileWriter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * @file FileWriter.h - * - * @brief Wrapper for writing files and avoiding overwriting existing files - * This class wraps a stream object and will check that the file is - * actually different to write the new generated file. - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include - -namespace wrap { - -class FileWriter { -protected: - bool verbose_; - std::string filename_; - std::string comment_str_; - -public: - std::ostringstream oss; ///< Primary stream for operating on the file - - /** Create a writer with a filename and delimiter for the header comment */ - FileWriter(const std::string& filename, bool verbose, const std::string& comment_str); - - /** Writes the contents of the stringstream to the file, checking if actually new */ - void emit(bool add_header, bool force=false) const; - -}; - -} // \namespace wrap diff --git a/wrap/ForwardDeclaration.h b/wrap/ForwardDeclaration.h deleted file mode 100644 index 190387ecc..000000000 --- a/wrap/ForwardDeclaration.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include - -namespace wrap { - - class Class; - - struct ForwardDeclaration { - Class cls; - bool isVirtual; - ForwardDeclaration() : isVirtual(false) {} - explicit ForwardDeclaration(const std::string& s) : cls(s), isVirtual(false) {} - std::string name() const { return cls.qualifiedName("::"); } - }; - -} diff --git a/wrap/FullyOverloadedFunction.cpp b/wrap/FullyOverloadedFunction.cpp deleted file mode 100644 index 4db4c8713..000000000 --- a/wrap/FullyOverloadedFunction.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include "FullyOverloadedFunction.h" - -using namespace std; - -namespace wrap { -const std::array FullyOverloadedFunction::pythonKeywords{ - {"print", "lambda"}}; - -/* ************************************************************************* */ -std::string FullyOverloadedFunction::pyx_functionCall( - const std::string& caller, - const std::string& funcName, size_t iOverload) const { - - string ret; - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) { - ret = returnVals_[iOverload].type1.make_shared_pxd_class_in_pyx() + "("; - } - - // actual function call ... - if (!caller.empty()) ret += caller + "."; - ret += funcName; - if (templateArgValue_) ret += "[" + templateArgValue_->pxd_class_in_pyx() + "]"; - //... with argument list - ret += "(" + argumentList(iOverload).pyx_asParams() + ")"; - - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) - ret += ")"; - - return ret; -} - -} diff --git a/wrap/FullyOverloadedFunction.h b/wrap/FullyOverloadedFunction.h deleted file mode 100644 index 6b40f6a70..000000000 --- a/wrap/FullyOverloadedFunction.h +++ /dev/null @@ -1,147 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file FullyOverloadedFunction.h - * @brief Function that can be fully overloaded: arguments and return values - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include - -namespace wrap { - -/** - * Signature Overload (including return value) - */ -class SignatureOverloads: public ArgumentOverloads { - -public: - - std::vector returnVals_; - -public: - - const ReturnValue& returnValue(size_t i) const { - return returnVals_.at(i); - } - - void push_back(const ArgumentList& args, const ReturnValue& retVal) { - argLists_.push_back(args); - returnVals_.push_back(retVal); - } - - void verifyReturnTypes(const std::vector& validtypes, - const std::string& s) const { - for(const ReturnValue& retval: returnVals_) { - retval.type1.verify(validtypes, s); - if (retval.isPair) - retval.type2.verify(validtypes, s); - } - } - - // TODO use transform ? - std::vector expandReturnValuesTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for(const ReturnValue& retVal: returnVals_) { - ReturnValue instRetVal = retVal.expandTemplate(ts); - result.push_back(instRetVal); - } - return result; - } - - /// Expand templates, imperative ! - void expandTemplate(const TemplateSubstitution& ts) { - // substitute template in arguments - argLists_ = expandArgumentListsTemplate(ts); - // do the same for return types - returnVals_ = expandReturnValuesTemplate(ts); - } - - // emit a list of comments, one for each overload - void usage_fragment(FileWriter& proxyFile, const std::string& name) const { - unsigned int argLCount = 0; - for(ArgumentList argList: argLists_) { - argList.emit_prototype(proxyFile, name); - if (argLCount != nrOverloads() - 1) - proxyFile.oss << ", "; - else - proxyFile.oss << " : returns " << returnValue(0).returnType() - << std::endl; - argLCount++; - } - } - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile, const std::string& name) const { - size_t i = 0; - for(ArgumentList argList: argLists_) { - proxyFile.oss << "%"; - argList.emit_prototype(proxyFile, name); - proxyFile.oss << " : returns " << returnVals_[i++].returnType() - << std::endl; - } - } - - friend std::ostream& operator<<(std::ostream& os, - const SignatureOverloads& overloads) { - for (size_t i = 0; i < overloads.nrOverloads(); i++) - os << overloads.returnVals_[i] << overloads.argLists_[i] << std::endl; - return os; - } - -}; - -class FullyOverloadedFunction: public Function, public SignatureOverloads { - -public: - - bool addOverload(const std::string& name, const ArgumentList& args, - const ReturnValue& retVal, boost::optional instName = - boost::none, bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - SignatureOverloads::push_back(args, retVal); - return first; - } - - // emit cython pyx function call - std::string pyx_functionCall(const std::string& caller, const std::string& funcName, - size_t iOverload) const; - - /// Cython: Rename functions which names are python keywords - static const std::array pythonKeywords; - static std::string pyRename(const std::string& name) { - if (std::find(pythonKeywords.begin(), pythonKeywords.end(), name) == - pythonKeywords.end()) - return name; - else - return name + "_"; - } -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -inline void verifyReturnTypes(const std::vector& validTypes, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for(const NamedMethod& namedMethod: vt) - namedMethod.second.verifyReturnTypes(validTypes); -} - -} // \namespace wrap - diff --git a/wrap/Function.cpp b/wrap/Function.cpp deleted file mode 100644 index 80b0adbbe..000000000 --- a/wrap/Function.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.ccp - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#include "Function.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Function::initializeOrCheck(const string& name, - boost::optional instName, bool verbose) { - - if (name.empty()) - throw runtime_error("Function::initializeOrCheck called with empty name"); - - // Check if this overload is give to the correct method - if (name_.empty()) { - name_ = name; - templateArgValue_ = instName; - verbose_ = verbose; - return true; - } else { - if (name_ != name || verbose_ != verbose - || ((bool) templateArgValue_ != (bool) instName) - || ((bool) templateArgValue_ && (bool) instName - && !(*templateArgValue_ == *instName))) - throw runtime_error( - "Function::initializeOrCheck called with different arguments"); - - return false; - } -} - -/* ************************************************************************* */ -void Function::emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const string& wrapperName, int id) const { - returnVal.emit_matlab(proxyFile); - proxyFile.oss << wrapperName << "(" << id; - if (!isStatic()) - proxyFile.oss << ", this"; - proxyFile.oss << ", varargin{:});\n"; -} - -/* ************************************************************************* */ -void Function::emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const string& wrapperName, int id) const { - - // Check all arguments - args.proxy_check(proxyFile); - - // output call to C++ wrapper - proxyFile.oss << " "; - emit_call(proxyFile, returnVal, wrapperName, id); -} - -/* ************************************************************************* */ diff --git a/wrap/Function.h b/wrap/Function.h deleted file mode 100644 index c39b3231c..000000000 --- a/wrap/Function.h +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.h - * @brief Base class for global functions and methods - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Argument.h" -#include - -namespace wrap { - -/// Function class -class Function { - -protected: - - std::string name_; ///< name of method - boost::optional templateArgValue_; ///< value of template argument if applicable - bool verbose_; - -public: - - /** - * @brief first time, fill in instance variables, otherwise check if same - * @return true if first time, false thereafter - */ - bool initializeOrCheck(const std::string& name, - boost::optional instName = boost::none, bool verbose = - false); - - std::string name() const { - return name_; - } - - /// Only Methods are non-static - virtual bool isStatic() const { - return true; - } - - std::string matlabName() const { - if (templateArgValue_) - return name_ + templateArgValue_->name(); - else - return name_; - } - - /// Emit function call to MATLAB (no argument checking) - void emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const std::string& wrapperName, int id) const; - - /// Emit checking arguments and function call to MATLAB - void emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const std::string& wrapperName, int id) const; - -}; - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.cpp b/wrap/GlobalFunction.cpp deleted file mode 100644 index 02ab19657..000000000 --- a/wrap/GlobalFunction.cpp +++ /dev/null @@ -1,227 +0,0 @@ -/** - * @file GlobalFunction.cpp - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#include "GlobalFunction.h" -#include "Class.h" -#include "utilities.h" - -#include - -namespace wrap { - -using namespace std; - -/* ************************************************************************* */ -void GlobalFunction::addOverload(const Qualified& overload, - const ArgumentList& args, const ReturnValue& retVal, const std::string& _includeFile, - boost::optional instName, bool verbose) { - FullyOverloadedFunction::addOverload(overload.name(), args, retVal, instName, - verbose); - overloads.push_back(overload); - includeFile = _includeFile; -} - -/* ************************************************************************* */ -void GlobalFunction::matlab_proxy(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // cluster overloads with same namespace - // create new GlobalFunction structures around namespaces - same namespaces and names are overloads - // map of namespace to global function - typedef map GlobalFunctionMap; - GlobalFunctionMap grouped_functions; - for (size_t i = 0; i < overloads.size(); ++i) { - Qualified overload = overloads.at(i); - // use concatenated namespaces as key - string str_ns = qualifiedName("", overload.namespaces()); - const ReturnValue& ret = returnValue(i); - const ArgumentList& args = argumentList(i); - grouped_functions[str_ns].addOverload(overload, args, ret); - } - - size_t lastcheck = grouped_functions.size(); - for(const GlobalFunctionMap::value_type& p: grouped_functions) { - p.second.generateSingleFunction(toolboxPath, wrapperName, typeAttributes, - file, functionNames); - if (--lastcheck != 0) - file.oss << endl; - } -} - -/* ************************************************************************* */ -void GlobalFunction::generateSingleFunction(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // create the folder for the namespace - const Qualified& overload1 = overloads.front(); - createNamespaceStructure(overload1.namespaces(), toolboxPath); - - // open destination mfunctionFileName - string mfunctionFileName = overload1.matlabName(toolboxPath); - FileWriter mfunctionFile(mfunctionFileName, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = overload1.qualifiedName("."); - const string matlabUniqueName = overload1.qualifiedName(""); - const string cppName = overload1.qualifiedName("::"); - - mfunctionFile.oss << "function varargout = " << name_ << "(varargin)\n"; - - for (size_t i = 0; i < nrOverloads(); ++i) { - const ArgumentList& args = argumentList(i); - const ReturnValue& returnVal = returnValue(i); - - const int id = functionNames.size(); - - // Output proxy matlab code - mfunctionFile.oss << " " << (i == 0 ? "" : "else"); - emit_conditional_call(mfunctionFile, returnVal, args, wrapperName, id); - - // Output C++ wrapper code - - const string wrapFunctionName = matlabUniqueName + "_" - + boost::lexical_cast(id); - - // call - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - file.oss << "{\n"; - - // check arguments - // NOTE: for static functions, there is no object passed - file.oss << " checkArguments(\"" << matlabUniqueName - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(file, 0); // We start at 0 because there is no self object - - // call method with default type and wrap result - if (returnVal.type1.name() != "void") - returnVal.wrap_result(cppName + "(" + args.names() + ")", file, - typeAttributes); - else - file.oss << cppName + "(" + args.names() + ");\n"; - - // finish - file.oss << "}\n"; - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - - mfunctionFile.oss << " else\n"; - mfunctionFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "');" << endl; - mfunctionFile.oss << " end" << endl; - - // Close file - mfunctionFile.emit(true); -} - -/* ************************************************************************* */ -void GlobalFunction::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "def(\"" << name_ << "\", " << name_ << ");\n"; -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pxd(FileWriter& file) const { - file.oss << "cdef extern from \"" << includeFile << "\" namespace \"" - << overloads[0].qualifiedNamespaces("::") - << "\":" << endl; - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, "", vector()); - file.oss << pxdName() + " \"" + overloads[0].qualifiedName("::") + - "\"("; - argumentList(i).emit_cython_pxd(file, "", vector()); - file.oss << ")"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx_no_overload(FileWriter& file) const { - string funcName = pyxName(); - - // Function definition - file.oss << "def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // funtion arguments - file.oss << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string ret = pyx_functionCall("", pxdName(), 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx(FileWriter& file) const { - string funcName = pyxName(); - - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file); - return; - } - - // Dealing with overloads.. - file.oss << "def " << funcName << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - file.oss << " success, results = " << funcName << "_" << i - << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n"; - - for (size_t i = 0; i < N; ++i) { - ArgumentList args = argumentList(i); - file.oss << "def " + funcName + "_" + to_string(i) + "(args, kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 2); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - // catch exception which indicates the parameters passed are incorrect. - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - - string call = pyx_functionCall("", pxdName(), i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - } -} -/* ************************************************************************* */ - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.h b/wrap/GlobalFunction.h deleted file mode 100644 index 099cefa70..000000000 --- a/wrap/GlobalFunction.h +++ /dev/null @@ -1,148 +0,0 @@ -/** - * @file GlobalFunction.h - * - * @brief Implements codegen for a global function wrapped in matlab - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include "FullyOverloadedFunction.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -namespace wrap { - -struct GlobalFunction: public FullyOverloadedFunction { - - std::vector overloads; ///< Stack of qualified names - std::string includeFile; - - // adds an overloaded version of this function, - void addOverload(const Qualified& overload, const ArgumentList& args, - const ReturnValue& retVal, const std::string& _includeFile = "", boost::optional instName = - boost::none, bool verbose = false); - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // codegen function called from Module to build the cpp and matlab versions of the function - void matlab_proxy(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // function name in Cython pxd - std::string pxdName() const { return "pxd_" + pyRename(name_); } - // function name in Python pyx - std::string pyxName() const { - std::string result = ""; - for(size_t i=0; i= 1) { - result += (overloads[0].namespaces_[i] + "_"); - } - } - result += pyRename(name_); - return result; - } - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile) const; - void emit_cython_pyx_no_overload(FileWriter& pyxFile) const; - -private: - - // Creates a single global function - all in same namespace - void generateSingleFunction(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - -}; - -typedef std::map GlobalFunctions; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct GlobalFunctionGrammar: public classic::grammar { - - GlobalFunctions& global_functions_; ///< successful parse will be placed in here - std::vector& namespaces_; - std::string& includeFile; - - /// Construct type grammar and specify where result is placed - GlobalFunctionGrammar(GlobalFunctions& global_functions, - std::vector& namespaces, - std::string& includeFile) - : global_functions_(global_functions), - namespaces_(namespaces), - includeFile(includeFile) {} - - /// Definition of type grammar - template - struct definition: BasicRules { - -// using BasicRules::name_p; -// using BasicRules::className_p; - using BasicRules::comments_p; - - ArgumentList args; - ArgumentListGrammar argumentList_g; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Qualified globalFunction; - - classic::rule globalFunctionName_p, global_function_p; - - definition(GlobalFunctionGrammar const& self) : - argumentList_g(args), returnValue_g(retVal) { - - using namespace classic; - bool verbose = false; // TODO - - globalFunctionName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // parse a global function - global_function_p = (returnValue_g >> globalFunctionName_p[assign_a( - globalFunction.name_)] >> - argumentList_g >> ';' >> *comments_p) // - [assign_a(globalFunction.namespaces_, self.namespaces_)] // - [bl::bind( - &GlobalFunction::addOverload, - bl::var(self.global_functions_)[bl::var(globalFunction.name_)], - bl::var(globalFunction), bl::var(args), bl::var(retVal), bl::var(self.includeFile), - boost::none, verbose)] // - [assign_a(retVal, retVal0)][clear_a(globalFunction)][clear_a(args)]; - } - - classic::rule const& start() const { - return global_function_p; - } - - }; -}; -// GlobalFunctionGrammar - -}// \namespace wrap - diff --git a/wrap/Method.cpp b/wrap/Method.cpp deleted file mode 100644 index 2a4b0b3af..000000000 --- a/wrap/Method.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.ccp - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Method::addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName, - bool verbose) { - bool first = MethodBase::addOverload(name, args, retVal, instName, verbose); - if (first) - is_const_ = is_const; - else if (is_const && !is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as const whereas before it was not"); - else if (!is_const && is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as non-const whereas before it was"); - return first; -} - -/* ************************************************************************* */ -void Method::proxy_header(FileWriter& proxyFile) const { - proxyFile.oss << " function varargout = " << matlabName() - << "(this, varargin)\n"; -} - -/* ************************************************************************* */ -string Method::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const { - // check arguments - // extra argument obj -> nargin-1 is passed ! - // example: checkArguments("equals",nargout,nargin-1,2); - wrapperFile.oss << " checkArguments(\"" << matlabName() - << "\",nargout,nargin-1," << args.size() << ");\n"; - - // get class pointer - // example: auto obj = unwrap_shared_ptr< Test >(in[0], "Test"); - wrapperFile.oss << " auto obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // unwrap arguments, see Argument.cpp, we start at 1 as first is obj - args.matlab_unwrap(wrapperFile, 1); - - // call method and wrap result - // example: out[0]=wrap(obj->return_field(t)); - string expanded = "obj->" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void Method::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - const string renamed = pyRename(name_); - if (renamed != name_) { - file.oss << pyRename(name_) + " \"" + name_ + "\"" << "("; - } else { - file.oss << name_ << "("; - } - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ")"; - // if (is_const_) file.oss << " const"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - string funcName = pyRename(name_); - - // leverage python's special treatment for print - if (funcName == "print_") { - file.oss << " def __repr__(self):\n"; - file.oss << " strBuf = RedirectCout()\n"; - file.oss << " self.print_('')\n"; - file.oss << " return strBuf.str()\n"; - } - - // Function definition - file.oss << " def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // function arguments - file.oss << "(self"; - if (argumentList(0).size() > 0) file.oss << ", "; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string ret = pyx_functionCall(caller, funcName, 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx(FileWriter& file, const Class& cls) const { - string funcName = pyRename(name_); - // For template function: modify name of function instantiation as python - // doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - string instantiatedName = - (templateArgValue_) ? funcName + templateArgValue_->pyxClassName() : - funcName; - - size_t N = nrOverloads(); - // It's easy if there's no overload - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " def " << instantiatedName << "(self, *args, **kwargs):\n"; - file.oss << " cdef list __params\n"; - - // Define return values for all possible overloads - vector return_type; // every overload has a return type, possibly void - map return_value; // we only define one return value for every distinct type - size_t j = 1; - for (size_t i = 0; i < nrOverloads(); ++i) { - if (returnVals_[i].isVoid()) { - return_type.push_back("void"); - } else { - const string type = returnVals_[i].pyx_returnType(); - return_type.push_back(type); - if (return_value.count(type) == 0) { - const string value = "return_value_" + to_string(j++); - return_value[type] = value; - file.oss << " cdef " << type << " " << value << "\n"; - } - } - } - - for (size_t i = 0; i < nrOverloads(); ++i) { - ArgumentList args = argumentList(i); - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 3); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string call = pyx_functionCall(caller, funcName, i); - if (!returnVals_[i].isVoid()) { - const string type = return_type[i]; - const string value = return_value[type]; - file.oss << " " << value << " = " << call << "\n"; - file.oss << " return " << returnVals_[i].pyx_casting(value) - << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return\n"; - } - file.oss << " except (AssertionError, ValueError):\n"; - file.oss << " pass\n"; - } - file.oss - << " raise TypeError('Incorrect arguments or types for method call.')\n\n"; -} -/* ************************************************************************* */ diff --git a/wrap/Method.h b/wrap/Method.h deleted file mode 100644 index 4d3c8d909..000000000 --- a/wrap/Method.h +++ /dev/null @@ -1,74 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.h - * @brief describes and generates code for methods - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// Method class -class Method: public MethodBase { - -protected: - bool is_const_; - -public: - - typedef const std::string& Str; - - bool addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName = boost::none, bool verbose = - false); - - bool isStatic() const override { - return false; - } - - virtual bool isConst() const { - return is_const_; - } - - bool isSameModifiers(const Method& other) const { - return is_const_ == other.is_const_ && - ((templateArgValue_ && other.templateArgValue_) || - (!templateArgValue_ && !other.templateArgValue_)); - } - - friend std::ostream& operator<<(std::ostream& os, const Method& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -private: - - // Emit method header - void proxy_header(FileWriter& proxyFile) const override; - - std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const override; -}; - -} // \namespace wrap - diff --git a/wrap/MethodBase.cpp b/wrap/MethodBase.cpp deleted file mode 100644 index a2ed68780..000000000 --- a/wrap/MethodBase.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void MethodBase::proxy_wrapper_fragments( - FileWriter& proxyFile, FileWriter& wrapperFile, Str cppClassName, - Str matlabQualName, Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - vector& functionNames) const { - // emit header, e.g., function varargout = templatedMethod(this, varargin) - proxy_header(proxyFile); - - // Emit comments for documentation - string up_name = boost::to_upper_copy(matlabName()); - proxyFile.oss << " % " << up_name << " usage: "; - usage_fragment(proxyFile, matlabName()); - - // Emit URL to Doxygen page - proxyFile.oss << " % " - << "Doxygen can be found at " - "http://research.cc.gatech.edu/borg/sites/edu.borg/html/" - "index.html" << endl; - - // Handle special case of single overload with all numeric arguments - if (nrOverloads() == 1 && argumentList(0).allScalar()) { - // Output proxy matlab code - // TODO: document why is it OK to not check arguments in this case - proxyFile.oss << " "; - const int id = (int)functionNames.size(); - emit_call(proxyFile, returnValue(0), wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, 0, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } else { - // Check arguments for all overloads - for (size_t i = 0; i < nrOverloads(); ++i) { - // Output proxy matlab code - proxyFile.oss << " " << (i == 0 ? "" : "else"); - const int id = (int)functionNames.size(); - emit_conditional_call(proxyFile, returnValue(i), argumentList(i), - wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, i, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "." << name_ << "');" << endl; - proxyFile.oss << " end\n"; - } - - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -string MethodBase::wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, const TypeAttributesTable& typeAttributes) const { - // generate code - - const string wrapFunctionName = - matlabUniqueName + "_" + name_ + "_" + boost::lexical_cast(id); - - const ArgumentList& args = argumentList(overload); - const ReturnValue& returnVal = returnValue(overload); - - // call - wrapperFile.oss - << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - wrapperFile.oss << "{\n"; - - // get call - // for static methods: cppClassName::staticMethod - // for instance methods: obj->instanceMethod - string expanded = - wrapper_call(wrapperFile, cppClassName, matlabUniqueName, args); - - expanded += ("(" + args.names() + ")"); - if (returnVal.type1.name() != "void") - returnVal.wrap_result(expanded, wrapperFile, typeAttributes); - else - wrapperFile.oss << " " + expanded + ";\n"; - - // finish - wrapperFile.oss << "}\n"; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void MethodBase::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/MethodBase.h b/wrap/MethodBase.h deleted file mode 100644 index ee72a6a53..000000000 --- a/wrap/MethodBase.h +++ /dev/null @@ -1,70 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "FullyOverloadedFunction.h" - -namespace wrap { - -// Forward declaration -class Class; - -/// MethodBase class -struct MethodBase : public FullyOverloadedFunction { - typedef const std::string& Str; - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile) const { - SignatureOverloads::comment_fragment(proxyFile, matlabName()); - } - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // MATLAB code generation - // classPath is class directory, e.g., ../matlab/@Point2 - void proxy_wrapper_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str cppClassName, Str matlabQualName, - Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - -protected: - virtual void proxy_header(FileWriter& proxyFile) const = 0; - - std::string wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, - const TypeAttributesTable& typeAttributes) const; ///< cpp wrapper - - virtual std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const = 0; -}; - -} // \namespace wrap diff --git a/wrap/Module.cpp b/wrap/Module.cpp deleted file mode 100644 index 780c6f8da..000000000 --- a/wrap/Module.cpp +++ /dev/null @@ -1,649 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.ccp - * @author Frank Dellaert - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Module.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; -using namespace BOOST_SPIRIT_CLASSIC_NS; -namespace bl = boost::lambda; -namespace fs = boost::filesystem; - -/* ************************************************************************* */ -// We parse an interface file into a Module object. -// The grammar is defined using the boost/spirit combinatorial parser. -// For example, str_p("const") parses the string "const", and the >> -// operator creates a sequence parser. The grammar below, composed of rules -// and with start rule [class_p], doubles as the specs for our interface files. -/* ************************************************************************* */ - -/* ************************************************************************* */ -// If a number of template arguments were given, generate a number of expanded -// class names, e.g., PriorFactor -> PriorFactorPose2, and add those classes -static void handle_possible_template(vector& classes, - vector& uninstantiatedClasses, - const Class& cls, const Template& t) { - uninstantiatedClasses.push_back(cls); - if (cls.templateArgs.empty() || t.empty()) { - classes.push_back(cls); - } else { - if (cls.templateArgs.size() != 1) - throw std::runtime_error( - "In-line template instantiations only handle a single template argument"); - string arg = cls.templateArgs.front(); - vector classInstantiations = - (t.nrValues() > 0) ? cls.expandTemplate(arg, t.argValues()) : - cls.expandTemplate(arg, t.intList()); - for(const Class& c: classInstantiations) - classes.push_back(c); - } -} - -static void push_typedef_pair(vector& typedefs, - const Qualified& oldType, - const Qualified& newType, - const string& includeFile) { - typedefs.push_back(TypedefPair(oldType, newType, includeFile)); -} - -/* ************************************************************************* */ -Module::Module(const std::string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ -} - -/* ************************************************************************* */ -Module::Module(const string& interfacePath, - const string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ - // read interface file - string interfaceFile = interfacePath + "/" + moduleName + ".h"; - string contents = file_contents(interfaceFile); - - // execute parsing - parseMarkup(contents); -} - -/* ************************************************************************* */ -void Module::parseMarkup(const std::string& data) { - // The parse imperatively :-( updates variables gradually during parse - // The one with postfix 0 are used to reset the variables after parse. - - //---------------------------------------------------------------------------- - // Grammar with actions that build the Class object. Actions are - // defined within the square brackets [] and are executed whenever a - // rule is successfully parsed. Define BOOST_SPIRIT_DEBUG to debug. - // The grammar is allows a very restricted C++ header - // lexeme_d turns off white space skipping - // http://www.boost.org/doc/libs/1_37_0/libs/spirit/classic/doc/directives.html - // ---------------------------------------------------------------------------- - - // Define Rule and instantiate basic rules - typedef rule Rule; - BasicRules basic; - - vector namespaces; // current namespace tag - string currentInclude; - - // parse a full class - Class cls0(verbose),cls(verbose); - Template classTemplate; - ClassGrammar class_g(cls,classTemplate); - Rule class_p = class_g // - [assign_a(cls.namespaces_, namespaces)] - [assign_a(cls.includeFile, currentInclude)][bl::bind( - &handle_possible_template, bl::var(classes), - bl::var(uninstantiatedClasses), bl::var(cls), - bl::var(classTemplate))][clear_a(classTemplate)] // - [assign_a(cls, cls0)]; - - // parse "gtsam::Pose2" and add to singleInstantiation.typeList - TemplateInstantiationTypedef singleInstantiation, singleInstantiation0; - TypeListGrammar<'<','>'> typelist_g(singleInstantiation.typeList); - - // typedef gtsam::RangeFactor RangeFactor2D; - TypeGrammar instantiationClass_g(singleInstantiation.class_); - Rule templateSingleInstantiation_p = - (str_p("typedef") >> instantiationClass_g >> - typelist_g >> - basic.className_p[assign_a(singleInstantiation.name_)] >> - ';') - [assign_a(singleInstantiation.namespaces_, namespaces)] - [push_back_a(templateInstantiationTypedefs, singleInstantiation)] - [assign_a(singleInstantiation, singleInstantiation0)]; - - Qualified oldType, newType; - TypeGrammar typedefOldClass_g(oldType), typedefNewClass_g(newType); - Rule typedef_p = - (str_p("typedef") >> typedefOldClass_g >> typedefNewClass_g >> - ';') - [assign_a(oldType.namespaces_, namespaces)] - [assign_a(newType.namespaces_, namespaces)] - [bl::bind(&push_typedef_pair, bl::var(typedefs), bl::var(oldType), - bl::var(newType), bl::var(currentInclude))]; - - // Create grammar for global functions - GlobalFunctionGrammar global_function_g(global_functions, namespaces, - currentInclude); - - Rule include_p = str_p("#include") >> ch_p('<') >> - (*(anychar_p - '>'))[push_back_a(includes)] - [assign_a(currentInclude)] >> - ch_p('>'); - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - - Rule namespace_def_p = - (str_p("namespace") - >> basic.namespace_p[push_back_a(namespaces)] - >> ch_p('{') - >> *(include_p | class_p | templateSingleInstantiation_p | typedef_p | global_function_g | namespace_def_p | basic.comments_p) - >> ch_p('}')) - [pop_a(namespaces)]; - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - - // parse forward declaration - ForwardDeclaration fwDec0, fwDec; - Class fwParentClass; - TypeGrammar className_g(fwDec.cls); - TypeGrammar classParent_g(fwParentClass); - Rule classParent_p = (':' >> classParent_g >> ';') // - [bl::bind(&Class::assignParent, bl::var(fwDec.cls), - bl::var(fwParentClass))][clear_a(fwParentClass)]; - - Rule forward_declaration_p = - !(str_p("virtual")[assign_a(fwDec.isVirtual, T)]) - >> str_p("class") >> className_g - >> (classParent_p | ';') - [push_back_a(forward_declarations, fwDec)] - [assign_a(cls,cls0)] // also clear class to avoid partial parse - [assign_a(fwDec, fwDec0)]; - - Rule module_content_p = basic.comments_p | include_p | class_p - | templateSingleInstantiation_p | forward_declaration_p - | global_function_g | namespace_def_p; - - Rule module_p = *module_content_p >> !end_p; - - // and parse contents - parse_info info = parse(data.c_str(), module_p, space_p); - if(!info.full) { - printf("parsing stopped at \n%.20s\n",info.stop); - cout << "Stopped in:\n" - "class '" << cls.name_ << "'" << endl; - throw ParseFailed((int)info.length); - } - - // Post-process classes for serialization markers - for(Class& cls: classes) - cls.erase_serialization(); - - for(Class& cls: uninstantiatedClasses) - cls.erase_serialization(); - - // Explicitly add methods to the classes from parents so it shows in documentation - for(Class& cls: classes) - cls.appendInheritedMethods(cls, classes); - - // - Remove inherited methods for Cython classes in the pxd, otherwise Cython can't decide which one to call. - // - Only inherited nontemplateMethods_ in uninstantiatedClasses need to be removed - // because that what we serialized to the pxd. - // - However, we check against the class parent's *methods_* to avoid looking into - // its grand parent and grand-grand parent, etc., because all those are already - // added in its direct parent. - // - So this must be called *after* the above code appendInheritedMethods!! - for(Class& cls: uninstantiatedClasses) - cls.removeInheritedNontemplateMethods(uninstantiatedClasses); - - // Expand templates - This is done first so that template instantiations are - // counted in the list of valid types, have their attributes and dependencies - // checked, etc. - expandedClasses = ExpandTypedefInstantiations(classes, - templateInstantiationTypedefs); - - // Dependency check list - vector validTypes = GenerateValidTypes(expandedClasses, - forward_declarations, typedefs); - - // Check that all classes have been defined somewhere - verifyArguments(validTypes, global_functions); - verifyReturnTypes(validTypes, global_functions); - - hasSerialiable = false; - for(const Class& cls: expandedClasses) - cls.verifyAll(validTypes,hasSerialiable); - - // Create type attributes table and check validity - typeAttributes.addClasses(expandedClasses); - typeAttributes.addForwardDeclarations(forward_declarations); - for (const TypedefPair& p: typedefs) - typeAttributes.addType(p.newType); - // add Eigen types as template arguments are also checked ? - vector eigen; - eigen.push_back(ForwardDeclaration("Vector")); - eigen.push_back(ForwardDeclaration("Matrix")); - typeAttributes.addForwardDeclarations(eigen); - typeAttributes.checkValidity(expandedClasses); -} - -/* ************************************************************************* */ -void Module::generate_matlab_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_wrapper"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "\n"; - - // Include boost.serialization archive headers before other class headers - if (hasSerialiable) { - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n\n"; - } - - // Generate includes while avoiding redundant includes - generateIncludes(wrapperFile); - - // create typedef classes - we put this at the top of the wrap file so that - // collectors and method arguments can use these typedefs - for(const Class& cls: expandedClasses) - if(!cls.typedefName.empty()) - wrapperFile.oss << cls.getTypedef() << "\n"; - wrapperFile.oss << "\n"; - - // Generate boost.serialization export flags (needs typedefs from above) - if (hasSerialiable) { - for(const Class& cls: expandedClasses) - if(cls.isSerializable) - wrapperFile.oss << cls.getSerializationExport() << "\n"; - wrapperFile.oss << "\n"; - } - - // Generate collectors and cleanup function to be called from mexAtExit - WriteCollectorsAndCleanupFcn(wrapperFile, name, expandedClasses); - - // generate RTTI registry (for returning derived-most types) - WriteRTTIRegistry(wrapperFile, name, expandedClasses); - - vector functionNames; // Function names stored by index for switch - - // create proxy class and wrapper code - for(const Class& cls: expandedClasses) - cls.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // create matlab files and wrapper code for global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // finish wrapper file - wrapperFile.oss << "\n"; - finish_wrapper(wrapperFile, functionNames); - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generate_cython_wrapper(const string& toolboxPath, const std::string& pxdImports) const { - fs::create_directories(toolboxPath); - string pxdFileName = toolboxPath + "/" + name + ".pxd"; - FileWriter pxdFile(pxdFileName, verbose, "#"); - pxdFile.oss << pxdImports << "\n"; - emit_cython_pxd(pxdFile); - string pyxFileName = toolboxPath + "/" + name + ".pyx"; - FileWriter pyxFile(pyxFileName, verbose, "#"); - emit_cython_pyx(pyxFile); -} - -/* ************************************************************************* */ -void Module::emit_cython_pxd(FileWriter& pxdFile) const { - // headers - pxdFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp.string cimport string\n" - "from libcpp.vector cimport vector\n" - "from libcpp.pair cimport pair\n" - "from libcpp.set cimport set\n" - "from libcpp.map cimport map\n" - "from libcpp cimport bool\n\n"; - - // boost shared_ptr - pxdFile.oss << "cdef extern from \"boost/shared_ptr.hpp\" namespace \"boost\":\n" - " cppclass shared_ptr[T]:\n" - " shared_ptr()\n" - " shared_ptr(T*)\n" - " T* get()\n" - " long use_count() const\n" - " T& operator*()\n\n" - " cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)\n\n"; - - // gtsam alignment-friendly shared_ptr - pxdFile.oss << "cdef extern from \"gtsam/base/make_shared.h\" namespace \"gtsam\":\n" - " cdef shared_ptr[T] make_shared[T](const T& r)\n\n"; - - for(const TypedefPair& types: typedefs) - types.emit_cython_pxd(pxdFile); - - //... wrap all classes - for (const Class& cls : uninstantiatedClasses) { - cls.emit_cython_pxd(pxdFile); - - for (const Class& expCls : expandedClasses) { - bool matchingNonTemplated = !expCls.templateClass - && expCls.pxdClassName() == cls.pxdClassName(); - bool isTemplatedFromCls = expCls.templateClass - && expCls.templateClass->pxdClassName() == cls.pxdClassName(); - - // ctypedef for template instantiations - if (isTemplatedFromCls) { - pxdFile.oss << "\n"; - pxdFile.oss << "ctypedef " << expCls.templateClass->pxdClassName() - << "["; - for (size_t i = 0; i < expCls.templateInstTypeList.size(); ++i) - pxdFile.oss << expCls.templateInstTypeList[i].pxdClassName() - << ((i == expCls.templateInstTypeList.size() - 1) ? "" : ", "); - pxdFile.oss << "] " << expCls.pxdClassName() << "\n"; - } - - // Python wrapper class - if (isTemplatedFromCls || matchingNonTemplated) { - expCls.emit_cython_wrapper_pxd(pxdFile); - } - } - pxdFile.oss << "\n\n"; - } - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pxd(pxdFile); - - pxdFile.emit(true); -} - -/* ************************************************************************* */ -void Module::emit_cython_pyx(FileWriter& pyxFile) const { - // directives... - // allow str to automatically coerce to std::string and back (for python3) - pyxFile.oss << "# cython: c_string_type=str, c_string_encoding=ascii\n\n"; - - // headers... - string pxdHeader = name; - pyxFile.oss << "cimport numpy as np\n" - "import numpy as npp\n" - "cimport " << pxdHeader << "\n" - "from ."<< pxdHeader << " cimport shared_ptr\n" - "from ."<< pxdHeader << " cimport dynamic_pointer_cast\n" - "from ."<< pxdHeader << " cimport make_shared\n"; - - pyxFile.oss << "# C helper function that copies all arguments into a positional list.\n" - "cdef list process_args(list keywords, tuple args, dict kwargs):\n" - " cdef str keyword\n" - " cdef int n = len(args), m = len(keywords)\n" - " cdef list params = list(args)\n" - " assert len(args)+len(kwargs) == m, 'Expected {} arguments'.format(m)\n" - " try:\n" - " return params + [kwargs[keyword] for keyword in keywords[n:]]\n" - " except:\n" - " raise ValueError('Epected arguments ' + str(keywords))\n"; - - // import all typedefs, e.g. from gtsam_wrapper cimport Key, so we don't need to say gtsam.Key - for(const Qualified& q: Qualified::BasicTypedefs) { - pyxFile.oss << "from " << pxdHeader << " cimport " << q.pxdClassName() << "\n"; - } - pyxFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp cimport bool\n\n" - "from libcpp.pair cimport pair\n" - "from libcpp.string cimport string\n" - "from cython.operator cimport dereference as deref\n\n\n"; - - // all classes include all forward declarations - std::vector allClasses = expandedClasses; - for(const ForwardDeclaration& fd: forward_declarations) - allClasses.push_back(fd.cls); - - for(const Class& cls: expandedClasses) - cls.emit_cython_pyx(pyxFile, allClasses); - pyxFile.oss << "\n"; - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pyx(pyxFile); - pyxFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generateIncludes(FileWriter& file) const { - - // collect includes - vector all_includes(includes); - - // sort and remove duplicates - sort(all_includes.begin(), all_includes.end()); - vector::const_iterator last_include = unique(all_includes.begin(), all_includes.end()); - vector::const_iterator it = all_includes.begin(); - // add includes to file - for (; it != last_include; ++it) - file.oss << "#include <" << *it << ">" << endl; - file.oss << "\n"; -} - - -/* ************************************************************************* */ - void Module::finish_wrapper(FileWriter& file, const std::vector& functionNames) const { - file.oss << "void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - file.oss << "{\n"; - file.oss << " mstream mout;\n"; // Send stdout to MATLAB console - file.oss << " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n"; - file.oss << " _" << name << "_RTTIRegister();\n\n"; - file.oss << " int id = unwrap(in[0]);\n\n"; - file.oss << " try {\n"; - file.oss << " switch(id) {\n"; - for(size_t id = 0; id < functionNames.size(); ++id) { - file.oss << " case " << id << ":\n"; - file.oss << " " << functionNames[id] << "(nargout, out, nargin-1, in+1);\n"; - file.oss << " break;\n"; - } - file.oss << " }\n"; - file.oss << " } catch(const std::exception& e) {\n"; - file.oss << " mexErrMsgTxt((\"Exception from gtsam:\\n\" + std::string(e.what()) + \"\\n\").c_str());\n"; - file.oss << " }\n"; - file.oss << "\n"; - file.oss << " std::cout.rdbuf(outbuf);\n"; // Restore cout - file.oss << "}\n"; - } - -/* ************************************************************************* */ -vector Module::ExpandTypedefInstantiations(const vector& classes, const vector instantiations) { - - vector expandedClasses = classes; - - for(const TemplateInstantiationTypedef& inst: instantiations) { - // Add the new class to the list - expandedClasses.push_back(inst.findAndExpand(classes)); - } - - // Remove all template classes - for(size_t i = 0; i < expandedClasses.size(); ++i) - if(!expandedClasses[i].templateArgs.empty()) { - expandedClasses.erase(expandedClasses.begin() + size_t(i)); - -- i; - } - - return expandedClasses; -} - -/* ************************************************************************* */ -vector Module::GenerateValidTypes(const vector& classes, const vector& forwardDeclarations, const vector& typedefs) { - vector validTypes; - for(const ForwardDeclaration& fwDec: forwardDeclarations) { - validTypes.push_back(fwDec.name()); - } - validTypes.push_back("void"); - validTypes.push_back("string"); - validTypes.push_back("int"); - validTypes.push_back("bool"); - validTypes.push_back("char"); - validTypes.push_back("unsigned char"); - validTypes.push_back("size_t"); - validTypes.push_back("double"); - validTypes.push_back("Vector"); - validTypes.push_back("Matrix"); - //Create a list of parsed classes for dependency checking - for(const Class& cls: classes) { - validTypes.push_back(cls.qualifiedName("::")); - } - for(const TypedefPair& p: typedefs) { - validTypes.push_back(p.newType.qualifiedName("::")); - } - - return validTypes; -} - -/* ************************************************************************* */ -void Module::WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - // Generate all collectors - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(), - cppName = cls.qualifiedName("::"); - wrapperFile.oss << "typedef std::set*> " - << "Collector_" << matlabUniqueName << ";\n"; - wrapperFile.oss << "static Collector_" << matlabUniqueName << - " collector_" << matlabUniqueName << ";\n"; - } - - // generate mexAtExit cleanup function - wrapperFile.oss << - "\nvoid _deleteAllObjects()\n" - "{\n" - " mstream mout;\n" // Send stdout to MATLAB console - " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n" - " bool anyDeleted = false;\n"; - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(); - const string cppName = cls.qualifiedName("::"); - const string collectorType = "Collector_" + matlabUniqueName; - const string collectorName = "collector_" + matlabUniqueName; - // The extra curly-braces around the for loops work around a limitation in MSVC (existing - // since 2005!) preventing more than 248 blocks. - wrapperFile.oss << - " { for(" << collectorType << "::iterator iter = " << collectorName << ".begin();\n" - " iter != " << collectorName << ".end(); ) {\n" - " delete *iter;\n" - " " << collectorName << ".erase(iter++);\n" - " anyDeleted = true;\n" - " } }\n"; - } - wrapperFile.oss << - " if(anyDeleted)\n" - " cout <<\n" - " \"WARNING: Wrap modules with variables in the workspace have been reloaded due to\\n\"\n" - " \"calling destructors, call 'clear all' again if you plan to now recompile a wrap\\n\"\n" - " \"module, so that your recompiled module is used instead of the old one.\" << endl;\n" - " std::cout.rdbuf(outbuf);\n" // Restore cout - "}\n\n"; -} - -/* ************************************************************************* */ -void Module::WriteRTTIRegistry(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - wrapperFile.oss << - "void _" << moduleName << "_RTTIRegister() {\n" - " const mxArray *alreadyCreated = mexGetVariablePtr(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\");\n" - " if(!alreadyCreated) {\n" - " std::map types;\n"; - for(const Class& cls: classes) { - if(cls.isVirtual) - wrapperFile.oss << - " types.insert(std::make_pair(typeid(" << cls.qualifiedName("::") << ").name(), \"" << cls.qualifiedName(".") << "\"));\n"; - } - wrapperFile.oss << "\n"; - - wrapperFile.oss << - " mxArray *registry = mexGetVariable(\"global\", \"gtsamwrap_rttiRegistry\");\n" - " if(!registry)\n" - " registry = mxCreateStructMatrix(1, 1, 0, NULL);\n" - " typedef std::pair StringPair;\n" - " for(const StringPair& rtti_matlab: types) {\n" - " int fieldId = mxAddField(registry, rtti_matlab.first.c_str());\n" - " if(fieldId < 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxArray *matlabName = mxCreateString(rtti_matlab.second.c_str());\n" - " mxSetFieldByNumber(registry, 0, fieldId, matlabName);\n" - " }\n" - " if(mexPutVariable(\"global\", \"gtsamwrap_rttiRegistry\", registry) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(registry);\n" - " \n" - " mxArray *newAlreadyCreated = mxCreateNumericMatrix(0, 0, mxINT8_CLASS, mxREAL);\n" - " if(mexPutVariable(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\", newAlreadyCreated) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(newAlreadyCreated);\n" - " }\n" - "}\n" - "\n"; -} - -/* ************************************************************************* */ -void Module::generate_python_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_python"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n\n"; - wrapperFile.oss << "using namespace boost::python;\n"; - wrapperFile.oss << "BOOST_PYTHON_MODULE(" + name + ")\n"; - wrapperFile.oss << "{\n"; - - // write out classes - for(const Class& cls: expandedClasses) { - cls.python_wrapper(wrapperFile); - } - - // write out global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.python_wrapper(wrapperFile); - - // finish wrapper file - wrapperFile.oss << "}\n"; - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ diff --git a/wrap/Module.h b/wrap/Module.h deleted file mode 100644 index 2a8344551..000000000 --- a/wrap/Module.h +++ /dev/null @@ -1,95 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.h - * @brief describes module to be wrapped - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "Class.h" -#include "GlobalFunction.h" -#include "TemplateInstantiationTypedef.h" -#include "ForwardDeclaration.h" -#include "TypedefPair.h" - -#include -#include -#include - -namespace wrap { - -/** - * A module just has a name and a list of classes - */ -struct Module { - - // Filled during parsing: - std::string name; ///< module name - bool verbose; ///< verbose flag - std::vector classes; ///< list of classes - std::vector uninstantiatedClasses; ///< list of template classes after instantiated - std::vector templateInstantiationTypedefs; ///< list of template instantiations - std::vector forward_declarations; - std::vector includes; ///< Include statements - GlobalFunctions global_functions; - std::vector typedefs; - - // After parsing: - std::vector expandedClasses; - bool hasSerialiable; - TypeAttributesTable typeAttributes; - - /// constructor that parses interface file - Module(const std::string& interfacePath, const std::string& moduleName, - bool enable_verbose = true); - - /// Dummy constructor that does no parsing - use only for testing - Module(const std::string& moduleName, bool enable_verbose = true); - - /// non-const function that performs parsing - typically called by constructor - /// Throws exception on failure - void parseMarkup(const std::string& data); - - /// MATLAB code generation: - void generate_matlab_wrapper(const std::string& path) const; - - /// Cython code generation: - void generate_cython_wrapper(const std::string& path, const std::string& pxdImports = "") const; - void emit_cython_pxd(FileWriter& file) const; - void emit_cython_pyx(FileWriter& file) const; - - void generateIncludes(FileWriter& file) const; - - void finish_wrapper(FileWriter& file, - const std::vector& functionNames) const; - - /// Python code generation: - void generate_python_wrapper(const std::string& path) const; - -private: - static std::vector ExpandTypedefInstantiations( - const std::vector& classes, - const std::vector instantiations); - static std::vector GenerateValidTypes( - const std::vector& classes, - const std::vector& forwardDeclarations, - const std::vector& typedefs); - static void WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); - static void WriteRTTIRegistry(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); -}; - -} // \namespace wrap diff --git a/wrap/OverloadedFunction.h b/wrap/OverloadedFunction.h deleted file mode 100644 index 6bcb72d94..000000000 --- a/wrap/OverloadedFunction.h +++ /dev/null @@ -1,140 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file OverloadedFunction.h - * @brief Function that can overload its arguments only - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Function.h" -#include "Argument.h" -#include -namespace wrap { - -/** - * ArgumentList Overloads - */ -class ArgumentOverloads { -public: - std::vector argLists_; - -public: - size_t nrOverloads() const { return argLists_.size(); } - - const ArgumentList& argumentList(size_t i) const { return argLists_.at(i); } - - void push_back(const ArgumentList& args) { argLists_.push_back(args); } - - std::vector expandArgumentListsTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for (const ArgumentList& argList : argLists_) { - ArgumentList instArgList = argList.expandTemplate(ts); - result.push_back(instArgList); - } - return result; - } - - /// Expand templates, imperative ! - virtual void ExpandTemplate(const TemplateSubstitution& ts) { - argLists_ = expandArgumentListsTemplate(ts); - } - - void verifyArguments(const std::vector& validArgs, - const std::string s) const { - for (const ArgumentList& argList : argLists_) { - for (Argument arg : argList) { - std::string fullType = arg.type.qualifiedName("::"); - if (find(validArgs.begin(), validArgs.end(), fullType) == - validArgs.end()) - throw DependencyMissing(fullType, "checking argument of " + s); - } - } - } - - friend std::ostream& operator<<(std::ostream& os, - const ArgumentOverloads& overloads) { - for (const ArgumentList& argList : overloads.argLists_) - os << argList << std::endl; - return os; - } - - std::string pyx_resolveOverloadParams(const ArgumentList& args, bool isVoid, - size_t indentLevel = 2) const { - std::string indent; - for (size_t i = 0; i < indentLevel; ++i) - indent += " "; - std::string s; - s += indent + "__params = process_args([" + args.pyx_paramsList() - + "], args, kwargs)\n"; - s += args.pyx_castParamsToPythonType(indent); - if (args.size() > 0) { - for (size_t i = 0; i < args.size(); ++i) { - // For python types we can do the assert after the assignment and save list accesses - if (args[i].type.isNonBasicType() || args[i].type.isEigen()) { - std::string param = args[i].name; - s += indent + "assert isinstance(" + param + ", " - + args[i].type.pyxArgumentType() + ")"; - if (args[i].type.isEigen()) { - s += " and " + param + ".ndim == " - + ((args[i].type.pyxClassName() == "Vector") ? "1" : "2"); - } - s += "\n"; - } - } - } - return s; - } -}; - -class OverloadedFunction : public Function, public ArgumentOverloads { -public: - bool addOverload(const std::string& name, const ArgumentList& args, - boost::optional instName = boost::none, - bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - ArgumentOverloads::push_back(args); - return first; - } - -private: -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -static std::map expandMethodTemplate( - const std::map& methods, const TemplateSubstitution& ts) { - std::map result; - typedef std::pair NamedMethod; - for (NamedMethod namedMethod : methods) { - F instMethod = namedMethod.second; - instMethod.expandTemplate(ts); - namedMethod.second = instMethod; - result.insert(namedMethod); - } - return result; -} - -template -inline void verifyArguments(const std::vector& validArgs, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for (const NamedMethod& namedMethod : vt) - namedMethod.second.verifyArguments(validArgs); -} - -} // \namespace wrap diff --git a/wrap/Qualified.cpp b/wrap/Qualified.cpp deleted file mode 100644 index 947e51d54..000000000 --- a/wrap/Qualified.cpp +++ /dev/null @@ -1,5 +0,0 @@ -#include - -namespace wrap { - std::vector Qualified::BasicTypedefs; -} diff --git a/wrap/Qualified.h b/wrap/Qualified.h deleted file mode 100644 index 416db239d..000000000 --- a/wrap/Qualified.h +++ /dev/null @@ -1,370 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Qualified.h - * @brief Qualified name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include -#include -#include -#include - -namespace wrap { - -/** - * Class to encapuslate a qualified name, i.e., with (nested) namespaces - */ -class Qualified { - -//protected: -public: - - std::vector namespaces_; ///< Stack of namespaces - std::string name_; ///< type name - static std::vector BasicTypedefs; - - friend struct TypeGrammar; - friend class TemplateSubstitution; - -public: - - /// the different categories - typedef enum { - CLASS = 1, EIGEN = 2, BASIS = 3, VOID = 4 - } Category; - Category category; - - /// Default constructor - Qualified() : - category(VOID) { - } - - /// Construct from name and optional category - Qualified(const std::string& n, Category c = CLASS) : - name_(n), category(c) { - } - - /// Construct from scoped name and optional category - Qualified(const std::string& ns1, const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - } - - /// Construct from doubly scoped name and optional category - Qualified(const std::string& ns1, const std::string& ns2, - const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - namespaces_.push_back(ns2); - } - - /// Construct from arbitrarily scoped name - Qualified(std::vector ns, const std::string& name) : - namespaces_(ns), name_(name), category(CLASS) { - } - - // Destructor - virtual ~Qualified() {} - - std::string name() const { - return name_; - } - - std::vector namespaces() const { - return namespaces_; - } - - // Qualified is 'abused' as template argument name as well - // this function checks whether *this matches with templateArg - bool match(const std::string& templateArg) const { - return (name_ == templateArg && namespaces_.empty()); //TODO && category == CLASS); - } - - bool match(const std::vector& templateArgs) const { - for(const std::string& s: templateArgs) - if (match(s)) return true; - return false; - } - - void rename(const Qualified& q) { - namespaces_ = q.namespaces_; - name_ = q.name_; - category = q.category; - } - - void expand(const std::string& expansion) { - name_ += expansion; - } - - bool operator==(const Qualified& other) const { - return namespaces_ == other.namespaces_ && name_ == other.name_ - && category == other.category; - } - - bool empty() const { - return namespaces_.empty() && name_.empty(); - } - - virtual void clear() { - namespaces_.clear(); - name_.clear(); - category = VOID; - } - - bool isScalar() const { - return (name() == "bool" || name() == "char" - || name() == "unsigned char" || name() == "int" - || name() == "size_t" || name() == "double"); - } - - bool isVoid() const { - return name() == "void"; - } - - bool isString() const { - return name() == "string"; - } - - bool isEigen() const { - return name() == "Vector" || name() == "Matrix"; - } - - bool isBasicTypedef() const { - return std::find(Qualified::BasicTypedefs.begin(), - Qualified::BasicTypedefs.end(), - *this) != Qualified::BasicTypedefs.end(); - } - - bool isNonBasicType() const { - return name() != "This" && !isString() && !isScalar() && !isEigen() && - !isVoid() && !isBasicTypedef(); - } - -public: - - static Qualified MakeClass(std::vector namespaces, - const std::string& name) { - return Qualified(namespaces, name); - } - - static Qualified MakeEigen(const std::string& name) { - return Qualified(name, EIGEN); - } - - static Qualified MakeBasis(const std::string& name) { - return Qualified(name, BASIS); - } - - static Qualified MakeVoid() { - return Qualified("void", VOID); - } - - /// Return a qualified namespace using given delimiter - std::string qualifiedNamespaces(const std::string& delimiter = "") const { - std::string result; - for (std::size_t i = 0; i < namespaces_.size(); ++i) - result += (namespaces_[i] + ((i VectorXd, Matrix --> MatrixXd - std::string pxdClassName() const { - if (isEigen()) - return name_ + "Xd"; - else if (isNonBasicType()) - return "C" + qualifiedName("_", 1); - else return name_; - } - - /// name of Python classes in pyx - /// They have the same name with the corresponding Cython classes in pxd - /// But note that they are different: These are Python classes in the pyx file - /// To refer to a Cython class in pyx, we need to add "pxd.", e.g. pxd.noiseModel_Gaussian - /// see the other function pxd_class_in_pyx for that purpose. - std::string pyxClassName() const { - if (isEigen()) - return name_; - else - return qualifiedName("_", 1); - } - - /// Python type of function arguments in pyx to interface with normal python scripts - /// Eigen types become np.ndarray (There's no Eigen types, e.g. VectorXd, in - /// Python. We have to pass in numpy array in the arguments, which will then be - /// converted to Eigen types in Cython) - std::string pyxArgumentType() const { - if (isEigen()) - return "np.ndarray"; - else - return qualifiedName("_", 1); - } - - /// return the Cython class in pxd corresponding to a Python class in pyx - std::string pxd_class_in_pyx() const { - if (isNonBasicType()) { - return pxdClassName(); - } else if (isEigen()) { - return name_ + "Xd"; - } else // basic types and not Eigen - return name_; - } - - /// the internal Cython shared obj in a Python class wrappper - std::string shared_pxd_obj_in_pyx() const { - return pxdClassName() + "_"; - } - - std::string make_shared_pxd_class_in_pyx() const { - return "make_shared[" + pxd_class_in_pyx() + "]"; - } - - std::string shared_pxd_class_in_pyx() const { - return "shared_ptr[" + pxd_class_in_pyx() + "]"; - } - - friend std::ostream& operator<<(std::ostream& os, const Qualified& q) { - os << q.qualifiedName("::"); - return os; - } -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TypeGrammar: classic::grammar { - - wrap::Qualified& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeGrammar(wrap::Qualified& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule void_p, basisType_p, eigenType_p, namespace_del_p, class_p, type_p; - - definition(TypeGrammar const& self) { - - using namespace wrap; - using namespace classic; - typedef BasicRules Basic; - - // HACK: use const values instead of using enums themselves - somehow this doesn't result in values getting assigned to gibberish - static const Qualified::Category EIGEN = Qualified::EIGEN; - static const Qualified::Category BASIS = Qualified::BASIS; - static const Qualified::Category CLASS = Qualified::CLASS; - static const Qualified::Category VOID = Qualified::VOID; - - void_p = str_p("void") // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, VOID)]; - - basisType_p = Basic::basisType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, BASIS)]; - - eigenType_p = Basic::eigenType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, EIGEN)]; - - namespace_del_p = Basic::namespace_p // - [push_back_a(self.result_.namespaces_)] >> str_p("::"); - - class_p = *namespace_del_p >> Basic::className_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, CLASS)]; - - type_p = void_p | basisType_p | class_p | eigenType_p; - } - - Rule const& start() const { - return type_p; - } - - }; -}; -// type_grammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -template -struct TypeListGrammar: public classic::grammar > { - - typedef std::vector TypeList; - TypeList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeListGrammar(TypeList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - wrap::Qualified type; ///< temporary for use during parsing - TypeGrammar type_g; ///< Individual Type grammars - - classic::rule type_p, typeList_p; - - definition(TypeListGrammar const& self) : - type_g(type) { - using namespace classic; - - type_p = type_g[push_back_a(self.result_, type)][clear_a(type)]; - - typeList_p = OPEN >> !type_p >> *(',' >> type_p) >> CLOSE; - } - - classic::rule const& start() const { - return typeList_p; - } - - }; -}; -// TypeListGrammar - -/* ************************************************************************* */ -// Needed for other parsers in Argument.h and ReturnType.h -static const bool T = true; - -} // \namespace wrap - diff --git a/wrap/README.md b/wrap/README.md index 014577b5a..f72c3f652 100644 --- a/wrap/README.md +++ b/wrap/README.md @@ -1,27 +1,76 @@ -# WRAP README -The wrap library wraps the GTSAM library into a MATLAB toolbox. +# WRAP -It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in matlab.h, the include file that is included by the mex files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes. +The wrap library wraps the GTSAM library into a Python library or MATLAB toolbox. +It was designed to be more general than just wrapping GTSAM. For notes on creating a wrap interface, see `gtsam.h` for what features can be wrapped into a toolbox, as well as the current state of the toolbox for GTSAM. -For notes on creating a wrap interface, see gtsam.h for what features can be wrapped into a toolbox, as well as the current state of the toolbox for gtsam. For more technical details on the interface, please read comments in matlab.h +## Prerequisites: Pybind11 and pyparsing + +1. This library uses `pybind11`, which is included as a subdirectory in GTSAM. +2. The `interface_parser.py` in this library uses `pyparsing` to parse the interface file `gtsam.h`. Please install it first in your current Python environment before attempting the build. + ``` + python3 -m pip install pyparsing + ``` + +## GTSAM Python wrapper + +**WARNING: On macOS, you have to statically build GTSAM to use the wrapper.** + +1. Set `GTSAM_BUILD_PYTHON=ON` while configuring the build with `cmake`. +1. What you can do in the `build` folder: + 1. Just run python then import GTSAM and play around: + ``` + + import gtsam + gtsam.__dir__() + ``` + + 1. Run the unittests: + ``` + python -m unittest discover + ``` + 1. Edit the unittests in `python/gtsam/*.py` and simply rerun the test. + They were symlinked to `/gtsam/*.py` to facilitate fast development. + ``` + python -m unittest gtsam/tests/test_Pose3.py + ``` + - NOTE: You might need to re-run `cmake ..` if files are deleted or added. +1. Do `make install` and `cd /python`. Here, you can: + 1. Run the unittests: + ``` + python setup.py test + ``` + 2. Install `gtsam` to your current Python environment. + ``` + python setup.py install + ``` + - NOTE: It's a good idea to create a virtual environment otherwise it will be installed in your system Python's site-packages. + + +## Old GTSAM Wrapper + +*Outdated note from the original wrap.* + +TODO: Update this. + +It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in `matlab.h`, the include file that is included by the `mex` files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes. + +For notes on creating a wrap interface, see `gtsam.h` for what features can be wrapped into a toolbox, as well as the current state of the toolbox for GTSAM. For more technical details on the interface, please read comments in `matlab.h` Some good things to know: OBJECT CREATION -- Classes are created by special constructors, e.g., new_GaussianFactorGraph_.cpp. - These constructors are called from the MATLAB class @GaussianFactorGraph. - new_GaussianFactorGraph_ calls wrap_constructed in matlab.h, see documentation there - +- Classes are created by special constructors, e.g., `new_GaussianFactorGraph_.cpp`. + These constructors are called from the MATLAB class `@GaussianFactorGraph`. + `new_GaussianFactorGraph_` calls wrap_constructed in `matlab.h`, see documentation there + METHOD (AND CONSTRUCTOR) ARGUMENTS - Simple argument types of methods, such as "double", will be converted in the - mex wrappers by calling unwrap, defined in matlab.h + `mex` wrappers by calling unwrap, defined in matlab.h - Vector and Matrix arguments are normally passed by reference in GTSAM, but - in gtsam.h you need to pretend they are passed by value, to trigger the - generation of the correct conversion routines unwrap and unwrap + in `gtsam.h` you need to pretend they are passed by value, to trigger the + generation of the correct conversion routines `unwrap` and `unwrap` - passing classes as arguments works, provided they are passed by reference. This triggers a call to unwrap_shared_ptr - - \ No newline at end of file diff --git a/wrap/ReturnType.cpp b/wrap/ReturnType.cpp deleted file mode 100644 index fdf86d975..000000000 --- a/wrap/ReturnType.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/** - * @file ReturnType.cpp - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "ReturnType.h" -#include "Class.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void ReturnType::wrap_result(const string& out, const string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - string cppType = qualifiedName("::"), matlabType = qualifiedName("."); - - if (category == CLASS) { - // Handle Classes - string objCopy, ptrType; - const bool isVirtual = typeAttributes.attributes(cppType).isVirtual; - if (isPtr) - objCopy = result; // a shared pointer can always be passed as is - else { - // but if we want an actual new object, things get more complex - if (isVirtual) - // A virtual class needs to be cloned, so the whole hierarchy is - // returned - objCopy = result + ".clone()"; - else { - // ...but a non-virtual class can just be copied - objCopy = "boost::make_shared<" + cppType + ">(" + result + ")"; - } - } - // e.g. out[1] = wrap_shared_ptr(pairResult.second,"gtsam.Point3", false); - wrapperFile.oss << out << " = wrap_shared_ptr(" << objCopy << ",\"" - << matlabType << "\", " << (isVirtual ? "true" : "false") - << ");\n"; - - } else if (isPtr) { - // Handle shared pointer case for BASIS/EIGEN/VOID - // This case does not actually occur in GTSAM wrappers, so untested! - wrapperFile.oss << " {\n boost::shared_ptr<" << qualifiedName("::") - << "> shared(" << result << ");" << endl; - wrapperFile.oss << out << " = wrap_shared_ptr(shared,\"" << matlabType - << "\");\n }\n"; - - } else if (matlabType != "void") - // Handle normal case case for BASIS/EIGEN - wrapperFile.oss << out << " = wrap< " << qualifiedName("::") << " >(" << result - << ");\n"; -} - -/* ************************************************************************* */ -void ReturnType::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType; - if (name() == "This") - cythonType = className; - else if (match(templateArgs)) - cythonType = name(); - else - cythonType = pxdClassName(); - if (isPtr) cythonType = "shared_ptr[" + cythonType + "]"; - file.oss << cythonType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_returnType(bool addShared) const { - string retType = pxd_class_in_pyx(); - if (isPtr || (isNonBasicType() && addShared)) - retType = "shared_ptr[" + retType + "]"; - return retType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_casting(const std::string& var, - bool isSharedVar) const { - if (isEigen()) { - string s = "ndarray_copy(" + var + ")"; - if (pyxClassName() == "Vector") - return s + ".squeeze()"; - else return s; - } - else if (isNonBasicType()) { - if (isPtr || isSharedVar) - return pyxClassName() + ".cyCreateFromShared(" + var + ")"; - else { - // construct a shared_ptr if var is not a shared ptr - return pyxClassName() + ".cyCreateFromShared(" + make_shared_pxd_class_in_pyx() + - + "(" + var + "))"; - } - } else - return var; -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnType.h b/wrap/ReturnType.h deleted file mode 100644 index 8d78bb48f..000000000 --- a/wrap/ReturnType.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * @file ReturnValue.h - * @brief Encapsulates a return type of a method - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "Qualified.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" -#include - -#pragma once - -namespace wrap { - -/** - * Encapsulates return value of a method or function - */ -struct ReturnType : public Qualified { - bool isPtr; - - friend struct ReturnValueGrammar; - - /// Makes a void type - ReturnType() : isPtr(false) {} - - /// Constructor, no namespaces - ReturnType(const std::string& name, Category c = CLASS, bool ptr = false) - : Qualified(name, c), isPtr(ptr) {} - - void clear() override { - Qualified::clear(); - isPtr = false; - } - - /// Check if this type is in a set of valid types - template - void verify(TYPES validtypes, const std::string& s) const { - std::string key = qualifiedName("::"); - if (find(validtypes.begin(), validtypes.end(), key) == validtypes.end()) - throw DependencyMissing(key, "checking return type of " + s); - } - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - - std::string pyx_returnType(bool addShared = true) const; - std::string pyx_casting(const std::string& var, - bool isSharedVar = true) const; - -private: - friend struct ReturnValue; - - /// Example: out[1] = wrap_shared_ptr(pairResult.second,"Test", false); - void wrap_result(const std::string& out, const std::string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnTypeGrammar : public classic::grammar { - wrap::ReturnType& result_; ///< successful parse will be placed in here - - TypeGrammar type_g; - - /// Construct ReturnType grammar and specify where result is placed - ReturnTypeGrammar(wrap::ReturnType& result) - : result_(result), type_g(result_) {} - - /// Definition of type grammar - template - struct definition { - classic::rule type_p; - - definition(ReturnTypeGrammar const& self) { - using namespace classic; - type_p = self.type_g >> !ch_p('*')[assign_a(self.result_.isPtr, T)]; - } - - classic::rule const& start() const { return type_p; } - }; -}; -// ReturnTypeGrammar - -} // \namespace wrap diff --git a/wrap/ReturnValue.cpp b/wrap/ReturnValue.cpp deleted file mode 100644 index e58e85602..000000000 --- a/wrap/ReturnValue.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/** - * @file ReturnValue.cpp - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - */ - -#include "ReturnValue.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -ReturnValue ReturnValue::expandTemplate(const TemplateSubstitution& ts) const { - ReturnValue instRetVal = *this; - instRetVal.type1 = ts.tryToSubstitite(type1); - if (isPair) instRetVal.type2 = ts.tryToSubstitite(type2); - return instRetVal; -} - -/* ************************************************************************* */ -string ReturnValue::returnType() const { - if (isPair) - return "pair< " + type1.qualifiedName("::") + ", " + - type2.qualifiedName("::") + " >"; - else - return type1.qualifiedName("::"); -} - -/* ************************************************************************* */ -string ReturnValue::matlab_returnType() const { - return isPair ? "[first,second]" : "result"; -} - -/* ************************************************************************* */ -void ReturnValue::wrap_result(const string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - if (isPair) { - // For a pair, store the returned pair so we do not evaluate the function - // twice - wrapperFile.oss << " auto pairResult = " << result - << ";\n"; - type1.wrap_result(" out[0]", "pairResult.first", wrapperFile, - typeAttributes); - type2.wrap_result(" out[1]", "pairResult.second", wrapperFile, - typeAttributes); - } else { // Not a pair - type1.wrap_result(" out[0]", result, wrapperFile, typeAttributes); - } -} - -/* ************************************************************************* */ -void ReturnValue::emit_matlab(FileWriter& proxyFile) const { - string output; - if (isPair) - proxyFile.oss << "[ varargout{1} varargout{2} ] = "; - else if (type1.category != ReturnType::VOID) - proxyFile.oss << "varargout{1} = "; -} - -/* ************************************************************************* */ -void ReturnValue::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - if (isPair) { - file.oss << "pair["; - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << ","; - type2.emit_cython_pxd(file, className, templateArgs); - file.oss << "] "; - } else { - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << " "; - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_returnType() const { - if (isVoid()) return ""; - if (isPair) { - return "pair [" + type1.pyx_returnType(false) + "," + - type2.pyx_returnType(false) + "]"; - } else { - return type1.pyx_returnType(true); - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_casting(const std::string& var) const { - if (isVoid()) return ""; - if (isPair) { - return "(" + type1.pyx_casting(var + ".first", false) + "," + - type2.pyx_casting(var + ".second", false) + ")"; - } else { - return type1.pyx_casting(var); - } -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnValue.h b/wrap/ReturnValue.h deleted file mode 100644 index 721132797..000000000 --- a/wrap/ReturnValue.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * @file ReturnValue.h - * - * @brief Encapsulates a return value from a method - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Richard Roberts - */ - -#include "ReturnType.h" -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#pragma once - -namespace wrap { - -/** - * Encapsulates return type of a method or function, possibly a pair - */ -struct ReturnValue { - - bool isPair; - ReturnType type1, type2; - - friend struct ReturnValueGrammar; - - /// Default constructor - ReturnValue() : - isPair(false) { - } - - /// Construct from type - ReturnValue(const ReturnType& type) : - isPair(false), type1(type) { - } - - /// Construct from pair type arguments - ReturnValue(const ReturnType& t1, const ReturnType& t2) : - isPair(true), type1(t1), type2(t2) { - } - - /// Destructor - virtual ~ReturnValue() {} - - virtual void clear() { - type1.clear(); - type2.clear(); - isPair = false; - } - - bool isVoid() const { - return !isPair && !type1.isPtr && (type1.name() == "void"); - } - - bool operator==(const ReturnValue& other) const { - return isPair == other.isPair && type1 == other.type1 - && type2 == other.type2; - } - - /// Substitute template argument - ReturnValue expandTemplate(const TemplateSubstitution& ts) const; - - std::string returnType() const; - - std::string matlab_returnType() const; - - void wrap_result(const std::string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; - - void emit_matlab(FileWriter& proxyFile) const; - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - std::string pyx_returnType() const; - std::string pyx_casting(const std::string& var) const; - - friend std::ostream& operator<<(std::ostream& os, const ReturnValue& r) { - if (!r.isPair && r.type1.category == ReturnType::VOID) - os << "void"; - else - os << r.returnType(); - return os; - } - -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnValueGrammar: public classic::grammar { - - wrap::ReturnValue& result_; ///< successful parse will be placed in here - ReturnTypeGrammar returnType1_g, returnType2_g; ///< Type parsers - - /// Construct type grammar and specify where result is placed - ReturnValueGrammar(wrap::ReturnValue& result) : - result_(result), returnType1_g(result.type1), returnType2_g(result.type2) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule pair_p, returnValue_p; - - definition(ReturnValueGrammar const& self) { - using namespace classic; - - pair_p = (str_p("pair") >> '<' >> self.returnType1_g >> ',' - >> self.returnType2_g >> '>')[assign_a(self.result_.isPair, T)]; - - returnValue_p = pair_p | self.returnType1_g; - } - - classic::rule const& start() const { - return returnValue_p; - } - - }; -}; -// ReturnValueGrammar - -}// \namespace wrap diff --git a/wrap/StaticMethod.cpp b/wrap/StaticMethod.cpp deleted file mode 100644 index 0f812ea61..000000000 --- a/wrap/StaticMethod.cpp +++ /dev/null @@ -1,151 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "StaticMethod.h" -#include "utilities.h" -#include "Class.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void StaticMethod::proxy_header(FileWriter& proxyFile) const { - string upperName = matlabName(); - upperName[0] = toupper(upperName[0], locale()); - proxyFile.oss << " function varargout = " << upperName << "(varargin)\n"; -} - -/* ************************************************************************* */ -string StaticMethod::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const { - // check arguments - // NOTE: for static functions, there is no object passed - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName << "." << name_ - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(wrapperFile, 0); // We start at 0 because there is no self object - - // call method and wrap result - // example: out[0]=wrap(staticMethod(t)); - string expanded = cppClassName + "::" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for(size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " @staticmethod\n"; - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << name_ + ((i>0)?"_" + to_string(i):"") << " \"" << name_ << "\"" << "("; - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ") except +\n"; - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_wrapper_pxd(FileWriter& file, - const Class& cls) const { - if (nrOverloads() > 1) { - for (size_t i = 0; i < nrOverloads(); ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs)\n"; - } - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - assert(nrOverloads() == 1); - file.oss << " @staticmethod\n"; - file.oss << " def " << name_ << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), name_, 0); - file.oss << " "; - if (!returnVals_[0].isVoid()) { - file.oss << "return " << returnVals_[0].pyx_casting(call) << "\n"; - } else - file.oss << call << "\n"; - file.oss << "\n"; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx(FileWriter& file, const Class& cls) const { - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " @staticmethod # overloaded\n"; - file.oss << " def " << name_ << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " success, results = " << cls.pyxClassName() << "." - << funcName << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n\n"; - - // Create cdef methods for all overloaded methods - for(size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - ArgumentList args = argumentList(i); - file.oss << pyx_resolveOverloadParams(args, false, 3); - - /// Call cython corresponding function and return - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string pxdFuncName = name_ + ((i>0)?"_" + to_string(i):""); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), pxdFuncName, i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/StaticMethod.h b/wrap/StaticMethod.h deleted file mode 100644 index dbb918596..000000000 --- a/wrap/StaticMethod.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// StaticMethod class -struct StaticMethod: public MethodBase { - - typedef const std::string& Str; - - friend std::ostream& operator<<(std::ostream& os, const StaticMethod& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << "static " << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_wrapper_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -protected: - - void proxy_header(FileWriter& proxyFile) const override; - - std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const override; -}; - -} // \namespace wrap - diff --git a/wrap/Template.h b/wrap/Template.h deleted file mode 100644 index 32f8e9761..000000000 --- a/wrap/Template.h +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Template.h - * @brief Template name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include - -namespace wrap { - -/// The template specification that goes before a method or a class -class Template { - std::string argName_; - std::vector argValues_; - std::vector intList_; - friend struct TemplateGrammar; -public: - /// The only way to get values into a Template is via our friendly Grammar - Template() { - } - void clear() { - argName_.clear(); - argValues_.clear(); - intList_.clear(); - } - const std::string& argName() const { - return argName_; - } - const std::vector& intList() const { - return intList_; - } - const std::vector& argValues() const { - return argValues_; - } - bool empty() const { - return argValues_.empty() && intList_.empty(); - } - size_t nrValues() const { - return argValues_.size(); - } - const Qualified& operator[](size_t i) const { - return argValues_[i]; - } - bool valid() const { - return !argName_.empty() && argValues_.size() > 0; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct IntListGrammar: public classic::grammar { - - typedef std::vector IntList; - IntList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - IntListGrammar(IntList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule integer_p, intList_p; - - definition(IntListGrammar const& self) { - using namespace classic; - - integer_p = int_p[push_back_a(self.result_)]; - - intList_p = '{' >> !integer_p >> *(',' >> integer_p) >> '}'; - } - - classic::rule const& start() const { - return intList_p; - } - - }; -}; -// IntListGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TemplateGrammar: public classic::grammar { - - Template& result_; ///< successful parse will be placed in here - TypeListGrammar<'{', '}'> argValues_g; ///< TypeList parser - IntListGrammar intList_g; ///< TypeList parser - - /// Construct type grammar and specify where result is placed - TemplateGrammar(Template& result) : - result_(result), argValues_g(result.argValues_), // - intList_g(result.intList_) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - classic::rule templateArgValues_p; - - definition(TemplateGrammar const& self) { - using classic::str_p; - using classic::assign_a; - templateArgValues_p = (str_p("template") >> '<' - >> (BasicRules::name_p)[assign_a(self.result_.argName_)] - >> '=' >> (self.argValues_g | self.intList_g) >> '>'); - } - - classic::rule const& start() const { - return templateArgValues_p; - } - - }; -}; -// TemplateGrammar - -/// Cool initializer for tests -static inline boost::optional