diff --git a/.github/scripts/unix.sh b/.github/scripts/unix.sh index 87b0a3100..1676ad537 100644 --- a/.github/scripts/unix.sh +++ b/.github/scripts/unix.sh @@ -71,7 +71,7 @@ function configure() -DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \ -DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DGTSAM_SINGLE_TEST_EXE=ON \ + -DGTSAM_SINGLE_TEST_EXE=OFF \ -DBOOST_ROOT=$BOOST_ROOT \ -DBoost_NO_SYSTEM_PATHS=ON \ -DBoost_ARCHITECTURE=-x64 diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index fa2425e4d..c699db0d3 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -20,19 +20,15 @@ jobs: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ - ubuntu-20.04-gcc-7, ubuntu-20.04-gcc-9, ubuntu-20.04-clang-9, + ubuntu-22.04-gcc-11, + ubuntu-22.04-clang-14, ] build_type: [Debug, Release] build_unstable: [ON] include: - - name: ubuntu-20.04-gcc-7 - os: ubuntu-20.04 - compiler: gcc - version: "7" - - name: ubuntu-20.04-gcc-9 os: ubuntu-20.04 compiler: gcc @@ -43,14 +39,24 @@ jobs: compiler: clang version: "9" + - name: ubuntu-22.04-gcc-11 + os: ubuntu-22.04 + compiler: gcc + version: "11" + + - name: ubuntu-22.04-clang-14 + os: ubuntu-22.04 + compiler: clang + version: "14" + steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Dependencies run: | - # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. - if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # LLVM (clang) 9/14 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ]; then # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 7b7646328..3fa3c15dd 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -14,7 +14,7 @@ jobs: GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} strategy: - fail-fast: false + fail-fast: true matrix: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. @@ -32,20 +32,14 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Dependencies run: | brew install cmake ninja brew install boost - if [ "${{ matrix.compiler }}" = "gcc" ]; then - brew install gcc@${{ matrix.version }} - echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV - echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV - else - sudo xcode-select -switch /Applications/Xcode.app - echo "CC=clang" >> $GITHUB_ENV - echo "CXX=clang++" >> $GITHUB_ENV - fi + sudo xcode-select -switch /Applications/Xcode.app + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV - name: Build and Test run: bash .github/scripts/unix.sh -t diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 4eb861ecc..442e26e47 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -14,58 +14,45 @@ jobs: PYTHON_VERSION: ${{ matrix.python_version }} strategy: - fail-fast: false + fail-fast: true matrix: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. - name: [ - ubuntu-20.04-gcc-7, - ubuntu-20.04-gcc-9, - ubuntu-20.04-clang-9, - macOS-11-xcode-13.4.1, - ubuntu-20.04-gcc-7-tbb, - ] + name: + [ + ubuntu-20.04-gcc-9, + ubuntu-20.04-gcc-9-tbb, + ubuntu-20.04-clang-9, + macOS-11-xcode-13.4.1, + ] - build_type: [Debug, Release] + build_type: [Release] python_version: [3] include: - - name: ubuntu-20.04-gcc-7 - os: ubuntu-20.04 - compiler: gcc - version: "7" - - name: ubuntu-20.04-gcc-9 os: ubuntu-20.04 compiler: gcc version: "9" - - name: ubuntu-20.04-clang-9 + - name: ubuntu-20.04-gcc-9-tbb os: ubuntu-20.04 - compiler: clang + compiler: gcc version: "9" + flag: tbb - # NOTE temporarily added this as it is a required check. - name: ubuntu-20.04-clang-9 os: ubuntu-20.04 compiler: clang version: "9" - build_type: Debug - python_version: "3" - name: macOS-11-xcode-13.4.1 os: macOS-11 compiler: xcode version: "13.4.1" - - name: ubuntu-20.04-gcc-7-tbb - os: ubuntu-20.04 - compiler: gcc - version: "7" - flag: tbb - steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install (Linux) if: runner.os == 'Linux' run: | @@ -82,7 +69,7 @@ jobs: sudo apt-get -y update sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev - + if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV @@ -98,15 +85,9 @@ jobs: brew tap ProfFan/robotics brew install cmake ninja brew install boost - if [ "${{ matrix.compiler }}" = "gcc" ]; then - brew install gcc@${{ matrix.version }} - echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV - echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV - else - sudo xcode-select -switch /Applications/Xcode.app - echo "CC=clang" >> $GITHUB_ENV - echo "CXX=clang++" >> $GITHUB_ENV - fi + sudo xcode-select -switch /Applications/Xcode.app + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV - name: Set GTSAM_WITH_TBB Flag if: matrix.flag == 'tbb' run: | diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml index 458394211..7582bf41c 100644 --- a/.github/workflows/build-special.yml +++ b/.github/workflows/build-special.yml @@ -22,71 +22,80 @@ jobs: # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ - ubuntu-gcc-deprecated, - ubuntu-gcc-quaternions, - ubuntu-gcc-tbb, - ubuntu-cayleymap, + ubuntu-clang-deprecated, + ubuntu-clang-quaternions, + ubuntu-clang-tbb, + ubuntu-clang-cayleymap, + ubuntu-clang-system-libs, + ubuntu-no-boost, ] build_type: [Debug, Release] include: - - name: ubuntu-gcc-deprecated - os: ubuntu-20.04 - compiler: gcc - version: "9" + - name: ubuntu-clang-deprecated + os: ubuntu-22.04 + compiler: clang + version: "14" flag: deprecated - - name: ubuntu-gcc-quaternions - os: ubuntu-20.04 - compiler: gcc - version: "9" + - name: ubuntu-clang-quaternions + os: ubuntu-22.04 + compiler: clang + version: "14" flag: quaternions - - name: ubuntu-gcc-tbb - os: ubuntu-20.04 - compiler: gcc - version: "9" + - name: ubuntu-clang-tbb + os: ubuntu-22.04 + compiler: clang + version: "14" flag: tbb - - name: ubuntu-cayleymap - os: ubuntu-20.04 - compiler: gcc - version: "9" + - name: ubuntu-clang-cayleymap + os: ubuntu-22.04 + compiler: clang + version: "14" flag: cayley - - name: ubuntu-system-libs - os: ubuntu-20.04 - compiler: gcc - version: "9" - flag: system-libs + - name: ubuntu-clang-system-libs + os: ubuntu-22.04 + compiler: clang + version: "14" + flag: system + + - name: ubuntu-no-boost + os: ubuntu-22.04 + compiler: clang + version: "14" + flag: no_boost steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install (Linux) if: runner.os == 'Linux' run: | - # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. - if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo apt-get -y update + sudo apt-get -y install software-properties-common + + # LLVM (clang) 9/14 is not in 22.04 (jammy)'s repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ]; then + # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool + # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure + # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository + # This key is not in the keystore by default for Ubuntu so we need to add it. + LLVM_KEY=15CF4D18AF4F7421 gpg --keyserver keyserver.ubuntu.com --recv-key $LLVM_KEY || gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-key $LLVM_KEY - gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + gpg -a --export $LLVM_KEY | sudo apt-key add - + sudo add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main" fi - sudo apt-get -y update sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev - if [ "${{ matrix.compiler }}" = "gcc" ]; then - sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib - echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV - echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV - else - sudo apt-get install -y clang-${{ matrix.version }} g++-multilib - echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV - echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV - fi + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV - name: Install Boost if: runner.os == 'Linux' @@ -97,15 +106,9 @@ jobs: if: runner.os == 'macOS' run: | brew install cmake ninja boost - if [ "${{ matrix.compiler }}" = "gcc" ]; then - brew install gcc@${{ matrix.version }} - echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV - echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV - else - sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app - echo "CC=clang" >> $GITHUB_ENV - echo "CXX=clang++" >> $GITHUB_ENV - fi + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV - name: Set Allow Deprecated Flag if: matrix.flag == 'deprecated' @@ -135,8 +138,18 @@ jobs: - name: Use system versions of 3rd party libraries if: matrix.flag == 'system' run: | + sudo apt-get install libeigen3-dev echo "GTSAM_USE_SYSTEM_EIGEN=ON" >> $GITHUB_ENV - echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV + # TODO(dellaert): This does not work yet? + # sudo apt-get install metis + # echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV + + - name: Turn off boost + if: matrix.flag == 'no_boost' + run: | + echo "GTSAM_ENABLE_BOOST_SERIALIZATION=OFF" >> $GITHUB_ENV + echo "GTSAM_USE_BOOST_FEATURES=OFF" >> $GITHUB_ENV + echo "GTSAM will not use BOOST" - name: Build & Test run: | diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index ef2500b46..3d4bf3faf 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -16,7 +16,7 @@ jobs: BOOST_EXE: boost_1_72_0-msvc-14.2 strategy: - fail-fast: false + fail-fast: true matrix: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. @@ -94,7 +94,7 @@ jobs: echo "BOOST_ROOT=$BOOST_PATH" >> $env:GITHUB_ENV - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Configuration run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index 52b34101f..ebe27443a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -50,8 +50,24 @@ endif() include(cmake/HandleGeneralOptions.cmake) # CMake build options -# Libraries: -include(cmake/HandleBoost.cmake) # Boost +############### Decide on BOOST ###################################### +# Enable or disable serialization with GTSAM_ENABLE_BOOST_SERIALIZATION +option(GTSAM_ENABLE_BOOST_SERIALIZATION "Enable Boost serialization" ON) +if(GTSAM_ENABLE_BOOST_SERIALIZATION) + add_definitions(-DGTSAM_ENABLE_BOOST_SERIALIZATION) +endif() + +option(GTSAM_USE_BOOST_FEATURES "Enable Features that use Boost" ON) +if(GTSAM_USE_BOOST_FEATURES) + add_definitions(-DGTSAM_USE_BOOST_FEATURES) +endif() + +if(GTSAM_ENABLE_BOOST_SERIALIZATION OR GTSAM_USE_BOOST_FEATURES) +include(cmake/HandleBoost.cmake) +endif() +###################################################################### + +# Other Libraries: include(cmake/HandleCCache.cmake) # ccache include(cmake/HandleCPack.cmake) # CPack include(cmake/HandleEigen.cmake) # Eigen3 diff --git a/CppUnitLite/CMakeLists.txt b/CppUnitLite/CMakeLists.txt index ab884ec1d..cbffa79d1 100644 --- a/CppUnitLite/CMakeLists.txt +++ b/CppUnitLite/CMakeLists.txt @@ -6,7 +6,6 @@ file(GLOB cppunitlite_src "*.cpp") add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers}) list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) -target_link_libraries(CppUnitLite PUBLIC Boost::boost) # boost/lexical_cast.h gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure diff --git a/CppUnitLite/Test.cpp b/CppUnitLite/Test.cpp index 78995a219..f5bea8819 100644 --- a/CppUnitLite/Test.cpp +++ b/CppUnitLite/Test.cpp @@ -15,8 +15,6 @@ #include "TestResult.h" #include "Failure.h" -#include - Test::Test (const std::string& testName) : name_ (testName), next_(0), lineNumber_(-1), safeCheck_(true) { @@ -47,10 +45,10 @@ bool Test::check(long expected, long actual, TestResult& result, const std::stri result.addFailure ( Failure ( name_, - boost::lexical_cast (__FILE__), + std::string(__FILE__), __LINE__, - boost::lexical_cast (expected), - boost::lexical_cast (actual))); + std::to_string(expected), + std::to_string(actual))); return false; @@ -64,7 +62,7 @@ bool Test::check(const std::string& expected, const std::string& actual, TestRes result.addFailure ( Failure ( name_, - boost::lexical_cast (__FILE__), + std::string(__FILE__), __LINE__, expected, actual)); diff --git a/CppUnitLite/Test.h b/CppUnitLite/Test.h index a898c83ef..422427251 100644 --- a/CppUnitLite/Test.h +++ b/CppUnitLite/Test.h @@ -23,7 +23,7 @@ #include -#include +#include class TestResult; @@ -112,17 +112,17 @@ protected: #define THROWS_EXCEPTION(condition)\ { try { condition; \ - result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + boost::lexical_cast(#condition))); \ + result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + std::string(#condition))); \ return; } \ catch (...) {} } #define CHECK_EXCEPTION(condition, exception_name)\ { try { condition; \ - result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + boost::lexical_cast(#condition))); \ + result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Didn't throw: ") + std::string(#condition))); \ return; } \ catch (exception_name&) {} \ catch (...) { \ - result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Wrong exception: ") + boost::lexical_cast(#condition) + boost::lexical_cast(", expected: ") + boost::lexical_cast(#exception_name))); \ + result_.addFailure (Failure (name_, __FILE__,__LINE__, std::string("Wrong exception: ") + std::string(#condition) + std::string(", expected: ") + std::string(#exception_name))); \ return; } } #define EQUALITY(expected,actual)\ @@ -130,21 +130,21 @@ protected: result_.addFailure(Failure(name_, __FILE__, __LINE__, #expected, #actual)); } #define CHECK_EQUAL(expected,actual)\ -{ if ((expected) == (actual)) return; result_.addFailure(Failure(name_, __FILE__, __LINE__, boost::lexical_cast(expected), boost::lexical_cast(actual))); } +{ if ((expected) == (actual)) return; result_.addFailure(Failure(name_, __FILE__, __LINE__, std::to_string(expected), std::to_string(actual))); } #define LONGS_EQUAL(expected,actual)\ { long actualTemp = actual; \ long expectedTemp = expected; \ if ((expectedTemp) != (actualTemp)) \ -{ result_.addFailure (Failure (name_, __FILE__, __LINE__, boost::lexical_cast(expectedTemp), \ -boost::lexical_cast(actualTemp))); return; } } +{ result_.addFailure (Failure (name_, __FILE__, __LINE__, std::to_string(expectedTemp), \ +std::to_string(actualTemp))); return; } } #define DOUBLES_EQUAL(expected,actual,threshold)\ { double actualTemp = actual; \ double expectedTemp = expected; \ if (!std::isfinite(actualTemp) || !std::isfinite(expectedTemp) || fabs ((expectedTemp)-(actualTemp)) > threshold) \ { result_.addFailure (Failure (name_, __FILE__, __LINE__, \ -boost::lexical_cast((double)expectedTemp), boost::lexical_cast((double)actualTemp))); return; } } +std::to_string((double)expectedTemp), std::to_string((double)actualTemp))); return; } } /* EXPECTs: tests will continue running after a failure */ @@ -156,15 +156,15 @@ boost::lexical_cast((double)expectedTemp), boost::lexical_cast(expectedTemp), \ -boost::lexical_cast(actualTemp))); } } +{ result_.addFailure (Failure (name_, __FILE__, __LINE__, std::to_string(expectedTemp), \ +std::to_string(actualTemp))); } } #define EXPECT_DOUBLES_EQUAL(expected,actual,threshold)\ { double actualTemp = actual; \ double expectedTemp = expected; \ if (!std::isfinite(actualTemp) || !std::isfinite(expectedTemp) || fabs ((expectedTemp)-(actualTemp)) > threshold) \ { result_.addFailure (Failure (name_, __FILE__, __LINE__, \ -boost::lexical_cast((double)expectedTemp), boost::lexical_cast((double)actualTemp))); } } +std::to_string((double)expectedTemp), std::to_string((double)actualTemp))); } } #define FAIL(text) \ diff --git a/GTSAM-Concepts.md b/GTSAM-Concepts.md index 953357ede..9911b3764 100644 --- a/GTSAM-Concepts.md +++ b/GTSAM-Concepts.md @@ -166,7 +166,7 @@ Concept Checks Boost provides a nice way to check whether a given type satisfies a concept. For example, the following - BOOST_CONCEPT_ASSERT(IsVectorSpace) + GTSAM_CONCEPT_ASSERT(IsVectorSpace) asserts that Point2 indeed is a model for the VectorSpace concept. diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake index e8f01a1f0..cc3cbee6d 100644 --- a/cmake/GtsamBuildTypes.cmake +++ b/cmake/GtsamBuildTypes.cmake @@ -87,7 +87,10 @@ if(MSVC) list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE WINDOWS_LEAN_AND_MEAN NOMINMAX - ) + ) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC + _ENABLE_EXTENDED_ALIGNED_STORAGE + ) # Avoid literally hundreds to thousands of warnings: list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC /wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data @@ -126,6 +129,7 @@ else() -fPIC # ensure proper code generation for shared libraries $<$:-Wreturn-local-addr -Werror=return-local-addr> # Error: return local address $<$:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address + $<$:-Wno-misleading-indentation> # Eigen triggers a ton! -Wreturn-type -Werror=return-type # Error on missing return() -Wformat -Werror=format-security # Error on wrong printf() arguments $<$:${flag_override_}> # Enforce the use of the override keyword diff --git a/cmake/example_cmake_find_gtsam/CMakeLists.txt b/cmake/example_cmake_find_gtsam/CMakeLists.txt index 9a4be4d70..d020f7032 100644 --- a/cmake/example_cmake_find_gtsam/CMakeLists.txt +++ b/cmake/example_cmake_find_gtsam/CMakeLists.txt @@ -12,6 +12,6 @@ add_executable(example ) # By using CMake exported targets, a simple "link" dependency introduces the -# include directories (-I) flags, links against Boost, and add any other +# include directories (-I) flags, and add any other # required build flags (e.g. C++11, etc.) target_link_libraries(example PRIVATE gtsam) diff --git a/cmake/obsolete/GtsamTestingObsolete.cmake b/cmake/obsolete/GtsamTestingObsolete.cmake index c90abfa6c..be9de93bd 100644 --- a/cmake/obsolete/GtsamTestingObsolete.cmake +++ b/cmake/obsolete/GtsamTestingObsolete.cmake @@ -2,7 +2,7 @@ # Macro for adding categorized tests in a "tests" folder, with # optional exclusion of tests and convenience library linking options # -# By default, all tests are linked with CppUnitLite and boost +# By default, all tests are linked with CppUnitLite # Arguments: # - subdir The name of the category for this test # - local_libs A list of convenience libraries to use (if GTSAM_BUILD_CONVENIENCE_LIBRARIES is true) @@ -32,7 +32,6 @@ endfunction() # Macro for adding categorized timing scripts in a "tests" folder, with # optional exclusion of tests and convenience library linking options # -# By default, all tests are linked with boost # Arguments: # - subdir The name of the category for this timing script # - local_libs A list of convenience libraries to use (if GTSAM_BUILD_CONVENIENCE_LIBRARIES is true) @@ -51,8 +50,7 @@ macro(gtsam_add_subdir_timing subdir local_libs full_libs excluded_srcs) endmacro() # Macro for adding executables matching a pattern - builds one executable for -# each file matching the pattern. These exectuables are automatically linked -# with boost. +# each file matching the pattern. # Arguments: # - pattern The glob pattern to match source files # - local_libs A list of convenience libraries to use (if GTSAM_BUILD_CONVENIENCE_LIBRARIES is true) @@ -138,9 +136,9 @@ macro(gtsam_add_grouped_scripts group pattern target_prefix pretty_prefix_name l # Linking and dependendencies if (GTSAM_BUILD_CONVENIENCE_LIBRARIES) - target_link_libraries(${script_bin} ${local_libs} ${GTSAM_BOOST_LIBRARIES}) + target_link_libraries(${script_bin} ${local_libs}) else() - target_link_libraries(${script_bin} ${full_libs} ${GTSAM_BOOST_LIBRARIES}) + target_link_libraries(${script_bin} ${full_libs}) endif() # Add .run target diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 1584a3a35..4b4a81c59 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,13 +1,23 @@ set (excluded_examples - elaboratePoint2KalmanFilter.cpp + "elaboratePoint2KalmanFilter.cpp" ) # if GTSAM_ENABLE_BOOST_SERIALIZATION is not set then SolverComparer.cpp will not compile if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION) - set (excluded_examples - ${excluded_examples} - SolverComparer.cpp + list (APPEND excluded_examples + "SolverComparer.cpp" ) endif() +# Add examples to exclude if GTSAM_USE_BOOST_FEATURES is not set +if (NOT GTSAM_USE_BOOST_FEATURES) + # add to excluded examples + list (APPEND excluded_examples + "CombinedImuFactorsExample.cpp" + "ImuFactorsExample.cpp" + "ShonanAveragingCLI.cpp" + "SolverComparer.cpp" + ) +endif() + gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;gtsam_unstable;${Boost_PROGRAM_OPTIONS_LIBRARY}") diff --git a/examples/Data/randomGrid3D.xml b/examples/Data/randomGrid3D.xml index 42eb473be..233ad4ad7 100644 --- a/examples/Data/randomGrid3D.xml +++ b/examples/Data/randomGrid3D.xml @@ -1,13 +1,13 @@ - - + + 32 1 - + @@ -100,9 +100,7 @@ 9 0 - - - + 1 @@ -199,9 +197,7 @@ 9 0 - - - + 1 @@ -298,9 +294,7 @@ 9 0 - - - + 1 @@ -397,9 +391,7 @@ 9 0 - - - + 1 @@ -496,9 +488,7 @@ 9 0 - - - + 1 @@ -595,9 +585,7 @@ 9 0 - - - + 1 @@ -694,9 +682,7 @@ 9 0 - - - + 1 @@ -793,9 +779,7 @@ 9 0 - - - + 1 @@ -892,9 +876,7 @@ 9 0 - - - + 1 @@ -991,9 +973,7 @@ 9 0 - - - + 1 @@ -1090,9 +1070,7 @@ 9 0 - - - + 1 @@ -1189,9 +1167,7 @@ 9 0 - - - + 1 @@ -1288,9 +1264,7 @@ 9 0 - - - + 1 @@ -1387,9 +1361,7 @@ 9 0 - - - + 1 @@ -1486,9 +1458,7 @@ 9 0 - - - + 1 @@ -1585,9 +1555,7 @@ 9 0 - - - + 1 @@ -1684,9 +1652,7 @@ 9 0 - - - + 1 @@ -1783,9 +1749,7 @@ 9 0 - - - + 1 @@ -1882,9 +1846,7 @@ 9 0 - - - + 1 @@ -1981,9 +1943,7 @@ 9 0 - - - + 1 @@ -2080,9 +2040,7 @@ 9 0 - - - + 1 @@ -2179,9 +2137,7 @@ 9 0 - - - + 1 @@ -2278,9 +2234,7 @@ 9 0 - - - + 1 @@ -2377,9 +2331,7 @@ 9 0 - - - + 1 @@ -2476,9 +2428,7 @@ 9 0 - - - + 1 @@ -2575,9 +2525,7 @@ 9 0 - - - + 1 @@ -2674,9 +2622,7 @@ 9 0 - - - + 1 @@ -2773,9 +2719,7 @@ 9 0 - - - + 1 @@ -2872,9 +2816,7 @@ 9 0 - - - + 1 @@ -2971,9 +2913,7 @@ 9 0 - - - + 1 @@ -3070,9 +3010,7 @@ 9 0 - - - + 1 @@ -3402,13 +3340,11 @@ 3 0 - - - + 1 - + diff --git a/examples/Data/toy3D.xml b/examples/Data/toy3D.xml index 26bd231ca..acc2bbe3c 100644 --- a/examples/Data/toy3D.xml +++ b/examples/Data/toy3D.xml @@ -1,13 +1,13 @@ - - + + 2 1 - + @@ -100,9 +100,7 @@ 9 0 - - - + 1 @@ -157,13 +155,11 @@ 3 0 - - - + 1 - + diff --git a/examples/DiscreteBayesNetExample.cpp b/examples/DiscreteBayesNetExample.cpp index dfd7beb63..09ee8baa6 100644 --- a/examples/DiscreteBayesNetExample.cpp +++ b/examples/DiscreteBayesNetExample.cpp @@ -51,8 +51,7 @@ int main(int argc, char **argv) { DiscreteFactorGraph fg(asia); // Create solver and eliminate - Ordering ordering; - ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7); + const Ordering ordering{0, 1, 2, 3, 4, 5, 6, 7}; // solve auto mpe = fg.optimize(); diff --git a/examples/ISAM2Example_SmartFactor.cpp b/examples/ISAM2Example_SmartFactor.cpp index a874efc9a..51503e34e 100644 --- a/examples/ISAM2Example_SmartFactor.cpp +++ b/examples/ISAM2Example_SmartFactor.cpp @@ -87,9 +87,8 @@ int main(int argc, char* argv[]) { result.print(); cout << "Detailed results:" << endl; - for (auto keyedStatus : result.detail->variableStatus) { - const auto& status = keyedStatus.second; - PrintKey(keyedStatus.first); + for (auto& [key, status] : result.detail->variableStatus) { + PrintKey(key); cout << " {" << endl; cout << "reeliminated: " << status.isReeliminated << endl; cout << "relinearized above thresh: " << status.isAboveRelinThreshold diff --git a/examples/Pose2SLAMExample_g2o.cpp b/examples/Pose2SLAMExample_g2o.cpp index fb3869a50..4a77f7be1 100644 --- a/examples/Pose2SLAMExample_g2o.cpp +++ b/examples/Pose2SLAMExample_g2o.cpp @@ -48,16 +48,16 @@ int main(const int argc, const char *argv[]) { Values::shared_ptr initial; bool is3D = false; if (kernelType.compare("none") == 0) { - boost::tie(graph, initial) = readG2o(g2oFile, is3D); + std::tie(graph, initial) = readG2o(g2oFile, is3D); } if (kernelType.compare("huber") == 0) { std::cout << "Using robust kernel: huber " << std::endl; - boost::tie(graph, initial) = + std::tie(graph, initial) = readG2o(g2oFile, is3D, KernelFunctionTypeHUBER); } if (kernelType.compare("tukey") == 0) { std::cout << "Using robust kernel: tukey " << std::endl; - boost::tie(graph, initial) = + std::tie(graph, initial) = readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY); } @@ -90,7 +90,7 @@ int main(const int argc, const char *argv[]) { std::cout << "Writing results to file: " << outputFile << std::endl; NonlinearFactorGraph::shared_ptr graphNoKernel; Values::shared_ptr initial2; - boost::tie(graphNoKernel, initial2) = readG2o(g2oFile); + std::tie(graphNoKernel, initial2) = readG2o(g2oFile); writeG2o(*graphNoKernel, result, outputFile); std::cout << "done! " << std::endl; } diff --git a/examples/Pose2SLAMExample_graph.cpp b/examples/Pose2SLAMExample_graph.cpp index c4301d28d..cf2513864 100644 --- a/examples/Pose2SLAMExample_graph.cpp +++ b/examples/Pose2SLAMExample_graph.cpp @@ -36,7 +36,7 @@ int main (int argc, char** argv) { Values::shared_ptr initial; SharedDiagonal model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.05, 0.05, 5.0 * M_PI / 180.0).finished()); string graph_file = findExampleDataFile("w100.graph"); - boost::tie(graph, initial) = load2D(graph_file, model); + std::tie(graph, initial) = load2D(graph_file, model); initial->print("Initial estimate:\n"); // Add a Gaussian prior on first poses diff --git a/examples/Pose2SLAMExample_lago.cpp b/examples/Pose2SLAMExample_lago.cpp index 1d02c64fa..c470e3904 100644 --- a/examples/Pose2SLAMExample_lago.cpp +++ b/examples/Pose2SLAMExample_lago.cpp @@ -37,7 +37,7 @@ int main(const int argc, const char *argv[]) { NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; - boost::tie(graph, initial) = readG2o(g2oFile); + std::tie(graph, initial) = readG2o(g2oFile); // Add prior on the pose having index (key) = 0 auto priorModel = noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8)); @@ -55,7 +55,7 @@ int main(const int argc, const char *argv[]) { std::cout << "Writing results to file: " << outputFile << std::endl; NonlinearFactorGraph::shared_ptr graphNoKernel; Values::shared_ptr initial2; - boost::tie(graphNoKernel, initial2) = readG2o(g2oFile); + std::tie(graphNoKernel, initial2) = readG2o(g2oFile); writeG2o(*graphNoKernel, estimateLago, outputFile); std::cout << "done! " << std::endl; } diff --git a/examples/Pose3Localization.cpp b/examples/Pose3Localization.cpp index 44076ab38..e23e6902a 100644 --- a/examples/Pose3Localization.cpp +++ b/examples/Pose3Localization.cpp @@ -37,7 +37,7 @@ int main(const int argc, const char* argv[]) { NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; bool is3D = true; - boost::tie(graph, initial) = readG2o(g2oFile, is3D); + std::tie(graph, initial) = readG2o(g2oFile, is3D); // Add prior on the first key auto priorModel = noiseModel::Diagonal::Variances( @@ -67,15 +67,15 @@ int main(const int argc, const char* argv[]) { std::cout << "Writing results to file: " << outputFile << std::endl; NonlinearFactorGraph::shared_ptr graphNoKernel; Values::shared_ptr initial2; - boost::tie(graphNoKernel, initial2) = readG2o(g2oFile); + std::tie(graphNoKernel, initial2) = readG2o(g2oFile); writeG2o(*graphNoKernel, result, outputFile); std::cout << "done! " << std::endl; } // Calculate and print marginal covariances for all variables Marginals marginals(*graph, result); - for (const auto& key_pose : result.extract()) { - std::cout << marginals.marginalCovariance(key_pose.first) << endl; + for (const auto& [key, pose] : result.extract()) { + std::cout << marginals.marginalCovariance(key) << endl; } return 0; } diff --git a/examples/Pose3SLAMExample_changeKeys.cpp b/examples/Pose3SLAMExample_changeKeys.cpp index 392b1c417..ca9448ea3 100644 --- a/examples/Pose3SLAMExample_changeKeys.cpp +++ b/examples/Pose3SLAMExample_changeKeys.cpp @@ -36,7 +36,7 @@ int main(const int argc, const char *argv[]) { NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; bool is3D = true; - boost::tie(graph, initial) = readG2o(g2oFile, is3D); + std::tie(graph, initial) = readG2o(g2oFile, is3D); bool add = false; Key firstKey = 8646911284551352320; diff --git a/examples/Pose3SLAMExample_g2o.cpp b/examples/Pose3SLAMExample_g2o.cpp index 7ae2978ce..0c99a75a0 100644 --- a/examples/Pose3SLAMExample_g2o.cpp +++ b/examples/Pose3SLAMExample_g2o.cpp @@ -36,7 +36,7 @@ int main(const int argc, const char* argv[]) { NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; bool is3D = true; - boost::tie(graph, initial) = readG2o(g2oFile, is3D); + std::tie(graph, initial) = readG2o(g2oFile, is3D); // Add prior on the first key auto priorModel = noiseModel::Diagonal::Variances( @@ -66,7 +66,7 @@ int main(const int argc, const char* argv[]) { std::cout << "Writing results to file: " << outputFile << std::endl; NonlinearFactorGraph::shared_ptr graphNoKernel; Values::shared_ptr initial2; - boost::tie(graphNoKernel, initial2) = readG2o(g2oFile); + std::tie(graphNoKernel, initial2) = readG2o(g2oFile); writeG2o(*graphNoKernel, result, outputFile); std::cout << "done! " << std::endl; } diff --git a/examples/Pose3SLAMExample_initializePose3Chordal.cpp b/examples/Pose3SLAMExample_initializePose3Chordal.cpp index 03db9fc77..c1cc97c85 100644 --- a/examples/Pose3SLAMExample_initializePose3Chordal.cpp +++ b/examples/Pose3SLAMExample_initializePose3Chordal.cpp @@ -36,7 +36,7 @@ int main(const int argc, const char* argv[]) { NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; bool is3D = true; - boost::tie(graph, initial) = readG2o(g2oFile, is3D); + std::tie(graph, initial) = readG2o(g2oFile, is3D); // Add prior on the first key auto priorModel = noiseModel::Diagonal::Variances( @@ -60,7 +60,7 @@ int main(const int argc, const char* argv[]) { std::cout << "Writing results to file: " << outputFile << std::endl; NonlinearFactorGraph::shared_ptr graphNoKernel; Values::shared_ptr initial2; - boost::tie(graphNoKernel, initial2) = readG2o(g2oFile); + std::tie(graphNoKernel, initial2) = readG2o(g2oFile); writeG2o(*graphNoKernel, initialization, outputFile); std::cout << "done! " << std::endl; } diff --git a/examples/Pose3SLAMExample_initializePose3Gradient.cpp b/examples/Pose3SLAMExample_initializePose3Gradient.cpp index 31693c5ff..740391017 100644 --- a/examples/Pose3SLAMExample_initializePose3Gradient.cpp +++ b/examples/Pose3SLAMExample_initializePose3Gradient.cpp @@ -36,7 +36,7 @@ int main(const int argc, const char* argv[]) { NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; bool is3D = true; - boost::tie(graph, initial) = readG2o(g2oFile, is3D); + std::tie(graph, initial) = readG2o(g2oFile, is3D); // Add prior on the first key auto priorModel = noiseModel::Diagonal::Variances( @@ -66,7 +66,7 @@ int main(const int argc, const char* argv[]) { std::cout << "Writing results to file: " << outputFile << std::endl; NonlinearFactorGraph::shared_ptr graphNoKernel; Values::shared_ptr initial2; - boost::tie(graphNoKernel, initial2) = readG2o(g2oFile); + std::tie(graphNoKernel, initial2) = readG2o(g2oFile); writeG2o(*graphNoKernel, initialization, outputFile); std::cout << "done! " << std::endl; } diff --git a/examples/RangeISAMExample_plaza2.cpp b/examples/RangeISAMExample_plaza2.cpp index aa61ffc6c..39cc6c4ef 100644 --- a/examples/RangeISAMExample_plaza2.cpp +++ b/examples/RangeISAMExample_plaza2.cpp @@ -92,7 +92,7 @@ std::list readOdometry() { // load the ranges from TD // Time (sec) Sender / Antenna ID Receiver Node ID Range (m) -using RangeTriple = boost::tuple; +using RangeTriple = std::tuple; std::vector readTriples() { std::vector triples; std::string data_file = gtsam::findExampleDataFile("Plaza2_TD.txt"); @@ -166,7 +166,7 @@ int main(int argc, char** argv) { //--------------------------------- odometry loop -------------------------- double t; Pose2 odometry; - boost::tie(t, odometry) = timedOdometry; + std::tie(t, odometry) = timedOdometry; // add odometry factor newFactors.emplace_shared>(i - 1, i, odometry, @@ -178,10 +178,10 @@ int main(int argc, char** argv) { initial.insert(i, predictedPose); // Check if there are range factors to be added - while (k < K && t >= boost::get<0>(triples[k])) { - size_t j = boost::get<1>(triples[k]); + while (k < K && t >= std::get<0>(triples[k])) { + size_t j = std::get<1>(triples[k]); Symbol landmark_key('L', j); - double range = boost::get<2>(triples[k]); + double range = std::get<2>(triples[k]); newFactors.emplace_shared>( i, landmark_key, range, rangeNoise); if (initializedLandmarks.count(landmark_key) == 0) { diff --git a/examples/SFMExampleExpressions_bal.cpp b/examples/SFMExampleExpressions_bal.cpp index 8a5a12e56..f2e3529ba 100644 --- a/examples/SFMExampleExpressions_bal.cpp +++ b/examples/SFMExampleExpressions_bal.cpp @@ -31,7 +31,6 @@ #include #include -#include #include using namespace std; @@ -50,8 +49,7 @@ int main(int argc, char* argv[]) { // Load the SfM data from file SfmData mydata = SfmData::FromBalFile(filename); - cout << boost::format("read %1% tracks on %2% cameras\n") % - mydata.numberTracks() % mydata.numberCameras(); + cout << "read " << mydata.numberTracks() << " tracks on " << mydata.numberCameras() << " cameras" << endl; // Create a factor graph ExpressionFactorGraph graph; @@ -79,9 +77,7 @@ int main(int argc, char* argv[]) { for (const SfmTrack& track : mydata.tracks) { // Leaf expression for j^th point Point3_ point_('p', j); - for (const SfmMeasurement& m : track.measurements) { - size_t i = m.first; - Point2 uv = m.second; + for (const auto& [i, uv] : track.measurements) { // Leaf expression for i^th camera Expression camera_(C(i)); // Below an expression for the prediction of the measurement: diff --git a/examples/SFMExample_bal.cpp b/examples/SFMExample_bal.cpp index 10563760d..9db5bd175 100644 --- a/examples/SFMExample_bal.cpp +++ b/examples/SFMExample_bal.cpp @@ -23,7 +23,6 @@ #include #include -#include #include using namespace std; @@ -45,7 +44,7 @@ int main (int argc, char* argv[]) { // Load the SfM data from file SfmData mydata = SfmData::FromBalFile(filename); - cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.numberTracks() % mydata.numberCameras(); + cout << "read " << mydata.numberTracks() << " tracks on " << mydata.numberCameras() << " cameras" << endl; // Create a factor graph NonlinearFactorGraph graph; @@ -57,9 +56,7 @@ int main (int argc, char* argv[]) { // Add measurements to the factor graph size_t j = 0; for(const SfmTrack& track: mydata.tracks) { - for(const SfmMeasurement& m: track.measurements) { - size_t i = m.first; - Point2 uv = m.second; + for (const auto& [i, uv] : track.measurements) { graph.emplace_shared(uv, noise, C(i), P(j)); // note use of shorthand symbols C and P } j += 1; diff --git a/examples/SFMExample_bal_COLAMD_METIS.cpp b/examples/SFMExample_bal_COLAMD_METIS.cpp index 92d779a56..592b83418 100644 --- a/examples/SFMExample_bal_COLAMD_METIS.cpp +++ b/examples/SFMExample_bal_COLAMD_METIS.cpp @@ -26,7 +26,6 @@ #include #include -#include #include using namespace std; @@ -47,8 +46,7 @@ int main(int argc, char* argv[]) { // Load the SfM data from file SfmData mydata = SfmData::FromBalFile(filename); - cout << boost::format("read %1% tracks on %2% cameras\n") % - mydata.numberTracks() % mydata.numberCameras(); + cout << "read " << mydata.numberTracks() << " tracks on " << mydata.numberCameras() << " cameras" << endl; // Create a factor graph NonlinearFactorGraph graph; @@ -59,9 +57,7 @@ int main(int argc, char* argv[]) { // Add measurements to the factor graph size_t j = 0; for (const SfmTrack& track : mydata.tracks) { - for (const SfmMeasurement& m : track.measurements) { - size_t i = m.first; - Point2 uv = m.second; + for (const auto& [i, uv] : track.measurements) { graph.emplace_shared( uv, noise, C(i), P(j)); // note use of shorthand symbols C and P } @@ -130,9 +126,9 @@ int main(int argc, char* argv[]) { cout << endl << endl; cout << "Time comparison by solving " << filename << " results:" << endl; - cout << boost::format("%1% point tracks and %2% cameras\n") % - mydata.numberTracks() % mydata.numberCameras() - << endl; + + cout << mydata.numberTracks() << " point tracks and " << mydata.numberCameras() + << " cameras" << endl; tictoc_print_(); } diff --git a/examples/ShonanAveragingCLI.cpp b/examples/ShonanAveragingCLI.cpp index c72a32017..831b608e3 100644 --- a/examples/ShonanAveragingCLI.cpp +++ b/examples/ShonanAveragingCLI.cpp @@ -103,7 +103,7 @@ int main(int argc, char* argv[]) { auto result = shonan.run(initial, pMin); // Parse file again to set up translation problem, adding a prior - boost::tie(inputGraph, posesInFile) = load2D(inputFile); + std::tie(inputGraph, posesInFile) = load2D(inputFile); auto priorModel = noiseModel::Unit::Create(3); inputGraph->addPrior(0, posesInFile->at(0), priorModel); @@ -119,7 +119,7 @@ int main(int argc, char* argv[]) { auto result = shonan.run(initial, pMin); // Parse file again to set up translation problem, adding a prior - boost::tie(inputGraph, posesInFile) = load3D(inputFile); + std::tie(inputGraph, posesInFile) = load3D(inputFile); auto priorModel = noiseModel::Unit::Create(6); inputGraph->addPrior(0, posesInFile->at(0), priorModel); diff --git a/examples/SolverComparer.cpp b/examples/SolverComparer.cpp index 4cbaaf617..7e451ca99 100644 --- a/examples/SolverComparer.cpp +++ b/examples/SolverComparer.cpp @@ -49,8 +49,6 @@ #include #include #include -#include -#include #include #include @@ -559,12 +557,12 @@ void runPerturb() // Perturb values VectorValues noise; - for(const auto& key_dim: initial.dims()) + for(const auto& [key, dim]: initial.dims()) { - Vector noisev(key_dim.second); + Vector noisev(dim); for(Vector::Index i = 0; i < noisev.size(); ++i) noisev(i) = normal(rng); - noise.insert(key_dim.first, noisev); + noise.insert(key, noisev); } Values perturbed = initial.retract(noise); diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index 03ffb54bc..a351d0d48 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -48,6 +48,23 @@ else() set(excluded_sources ${excluded_sources} "${CMAKE_CURRENT_SOURCE_DIR}/geometry/Rot3Q.cpp") endif() +# if GTSAM_ENABLE_BOOST_SERIALIZATION is not set, then we need to exclude the following: +if(NOT GTSAM_ENABLE_BOOST_SERIALIZATION) + list (APPEND excluded_headers + "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/base/serializationTestHelpers.h" + "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/base/serialization.h" + ) +endif() + +# if GTSAM_USE_BOOST_FEATURES is not set, then we need to exclude the following: +if(NOT GTSAM_USE_BOOST_FEATURES) + list (APPEND excluded_sources + "${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/GncOptimizer.h" + "${CMAKE_CURRENT_SOURCE_DIR}/inference/graph.h" + "${CMAKE_CURRENT_SOURCE_DIR}/inference/graph-inl.h" + ) +endif() + # Common headers file(GLOB gtsam_core_headers "*.h") install(FILES ${gtsam_core_headers} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam) diff --git a/gtsam/base/ConcurrentMap.h b/gtsam/base/ConcurrentMap.h index 66d06d324..372f02d06 100644 --- a/gtsam/base/ConcurrentMap.h +++ b/gtsam/base/ConcurrentMap.h @@ -52,7 +52,6 @@ using ConcurrentMapBase = gtsam::FastMap; #include #include #endif -#include #include diff --git a/gtsam/base/DSFMap.h b/gtsam/base/DSFMap.h index a666a8334..c7ae09f20 100644 --- a/gtsam/base/DSFMap.h +++ b/gtsam/base/DSFMap.h @@ -50,7 +50,7 @@ class DSFMap { iterator it = entries_.find(key); // if key does not exist, create and return itself if (it == entries_.end()) { - it = entries_.insert(std::make_pair(key, empty)).first; + it = entries_.insert({key, empty}).first; it->second.parent_ = it; it->second.rank_ = 0; } diff --git a/gtsam/base/FastList.h b/gtsam/base/FastList.h index d7c2dd6f4..414c1e83f 100644 --- a/gtsam/base/FastList.h +++ b/gtsam/base/FastList.h @@ -20,10 +20,12 @@ #include #include -#include #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #include +#if BOOST_VERSION >= 107400 +#include +#endif #include #endif diff --git a/gtsam/base/FastMap.h b/gtsam/base/FastMap.h index 359c865a5..e8ef3fc4f 100644 --- a/gtsam/base/FastMap.h +++ b/gtsam/base/FastMap.h @@ -63,7 +63,7 @@ public: } /** Handy 'insert' function for Matlab wrapper */ - bool insert2(const KEY& key, const VALUE& val) { return Base::insert(std::make_pair(key, val)).second; } + bool insert2(const KEY& key, const VALUE& val) { return Base::insert({key, val}).second; } /** Handy 'exists' function */ bool exists(const KEY& e) const { return this->find(e) != this->end(); } diff --git a/gtsam/base/FastSet.h b/gtsam/base/FastSet.h index b7c00801b..a1be08d80 100644 --- a/gtsam/base/FastSet.h +++ b/gtsam/base/FastSet.h @@ -18,8 +18,8 @@ #pragma once -#include #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION +#include #if BOOST_VERSION >= 107400 #include #endif @@ -51,7 +51,7 @@ template class FastSet: public std::set, typename internal::FastDefaultAllocator::type> { - BOOST_CONCEPT_ASSERT ((IsTestable )); + GTSAM_CONCEPT_ASSERT(IsTestable); public: diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index bbf0f57ad..bb92b6b2e 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -23,8 +23,6 @@ #include #include -#include - #include #include #include // operator typeid diff --git a/gtsam/base/Group.h b/gtsam/base/Group.h index 89fa8248c..3430a06ae 100644 --- a/gtsam/base/Group.h +++ b/gtsam/base/Group.h @@ -22,10 +22,13 @@ #include +#ifdef GTSAM_USE_BOOST_FEATURES #include #include #include #include +#endif + #include namespace gtsam { @@ -50,8 +53,8 @@ public: //typedef typename traits::identity::value_type identity_value_type; BOOST_CONCEPT_USAGE(IsGroup) { - BOOST_STATIC_ASSERT_MSG( - (boost::is_base_of::value), + static_assert( + (std::is_base_of::value), "This type's structure_category trait does not assert it as a group (or derived)"); e = traits::Identity(); e = traits::Compose(g, h); @@ -79,7 +82,7 @@ private: /// Check invariants template -BOOST_CONCEPT_REQUIRES(((IsGroup)),(bool)) // +GTSAM_CONCEPT_REQUIRES(IsGroup,bool) // check_group_invariants(const G& a, const G& b, double tol = 1e-9) { G e = traits::Identity(); return traits::Equals(traits::Compose(a, traits::Inverse(a)), e, tol) @@ -125,7 +128,7 @@ struct AdditiveGroup : AdditiveGroupTraits, Testable {}; /// compose multiple times template -BOOST_CONCEPT_REQUIRES(((IsGroup)),(G)) // +GTSAM_CONCEPT_REQUIRES(IsGroup,G) // compose_pow(const G& g, size_t n) { if (n == 0) return traits::Identity(); else if (n == 1) return g; @@ -136,8 +139,8 @@ compose_pow(const G& g, size_t n) { /// Assumes nothing except group structure and Testable from G and H template class DirectProduct: public std::pair { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsGroup); public: /// Default constructor yields identity @@ -167,8 +170,8 @@ struct traits > : /// Assumes existence of three additive operators for both groups template class DirectSum: public std::pair { - BOOST_CONCEPT_ASSERT((IsGroup)); // TODO(frank): check additive - BOOST_CONCEPT_ASSERT((IsGroup)); // TODO(frank): check additive + GTSAM_CONCEPT_ASSERT1(IsGroup); // TODO(frank): check additive + GTSAM_CONCEPT_ASSERT2(IsGroup); // TODO(frank): check additive const G& g() const { return this->first; } const H& h() const { return this->second;} diff --git a/gtsam/base/Lie.h b/gtsam/base/Lie.h index 1b574816a..ce021e10e 100644 --- a/gtsam/base/Lie.h +++ b/gtsam/base/Lie.h @@ -265,8 +265,8 @@ public: typedef typename traits::ChartJacobian ChartJacobian; BOOST_CONCEPT_USAGE(IsLieGroup) { - BOOST_STATIC_ASSERT_MSG( - (boost::is_base_of::value), + static_assert( + (std::is_base_of::value), "This type's trait does not assert it is a Lie group (or derived)"); // group opertations with Jacobians diff --git a/gtsam/base/Manifold.h b/gtsam/base/Manifold.h index 962dc8269..815c8b288 100644 --- a/gtsam/base/Manifold.h +++ b/gtsam/base/Manifold.h @@ -22,10 +22,7 @@ #include #include #include - -#include -#include -#include +#include namespace gtsam { @@ -95,7 +92,7 @@ template struct ManifoldTraits: GetDimensionImpl { // Check that Class has the necessary machinery - BOOST_CONCEPT_ASSERT((HasManifoldPrereqs)); + GTSAM_CONCEPT_ASSERT(HasManifoldPrereqs); // Dimension of the manifold enum { dimension = Class::dimension }; @@ -123,7 +120,7 @@ template struct Manifold: ManifoldTraits, Testable {} /// Check invariants for Manifold type template -BOOST_CONCEPT_REQUIRES(((IsTestable)),(bool)) // +GTSAM_CONCEPT_REQUIRES(IsTestable, bool) // check_manifold_invariants(const T& a, const T& b, double tol=1e-9) { typename traits::TangentVector v0 = traits::Local(a,a); typename traits::TangentVector v = traits::Local(a,b); @@ -143,10 +140,10 @@ public: typedef typename traits::TangentVector TangentVector; BOOST_CONCEPT_USAGE(IsManifold) { - BOOST_STATIC_ASSERT_MSG( - (boost::is_base_of::value), + static_assert( + (std::is_base_of::value), "This type's structure_category trait does not assert it as a manifold (or derived)"); - BOOST_STATIC_ASSERT(TangentVector::SizeAtCompileTime == dim); + static_assert(TangentVector::SizeAtCompileTime == dim); // make sure Chart methods are defined v = traits::Local(p, q); @@ -164,7 +161,7 @@ template struct FixedDimension { typedef const int value_type; static const int value = traits::dimension; - BOOST_STATIC_ASSERT_MSG(value != Eigen::Dynamic, + static_assert(value != Eigen::Dynamic, "FixedDimension instantiated for dymanically-sized type."); }; } // \ namespace gtsam diff --git a/gtsam/base/Matrix.cpp b/gtsam/base/Matrix.cpp index 5b8a021d4..3c6e64dbc 100644 --- a/gtsam/base/Matrix.cpp +++ b/gtsam/base/Matrix.cpp @@ -23,10 +23,6 @@ #include #include -#include -#include -#include - #include #include #include @@ -129,8 +125,10 @@ bool linear_dependent(const Matrix& A, const Matrix& B, double tol) { /* ************************************************************************* */ Vector operator^(const Matrix& A, const Vector & v) { - if (A.rows()!=v.size()) throw std::invalid_argument( - boost::str(boost::format("Matrix operator^ : A.m(%d)!=v.size(%d)") % A.rows() % v.size())); + if (A.rows()!=v.size()) { + throw std::invalid_argument("Matrix operator^ : A.m(" + std::to_string(A.rows()) + ")!=v.size(" + + std::to_string(v.size()) + ")"); + } // Vector vt = v.transpose(); // Vector vtA = vt * A; // return vtA.transpose(); @@ -250,8 +248,7 @@ pair qr(const Matrix& A) { xjm(k) = R(j+k, j); // calculate the Householder vector v - double beta; Vector vjm; - boost::tie(beta,vjm) = house(xjm); + const auto [beta, vjm] = house(xjm); // pad with zeros to get m-dimensional vector v for(size_t k = 0 ; k < m; k++) @@ -269,13 +266,13 @@ pair qr(const Matrix& A) { } /* ************************************************************************* */ -list > +list > weighted_eliminate(Matrix& A, Vector& b, const Vector& sigmas) { size_t m = A.rows(), n = A.cols(); // get size(A) size_t maxRank = min(m,n); // create list - list > results; + list > results; Vector pseudo(m); // allocate storage for pseudo-inverse Vector weights = sigmas.array().square().inverse(); // calculate weights once @@ -304,7 +301,7 @@ weighted_eliminate(Matrix& A, Vector& b, const Vector& sigmas) { // construct solution (r, d, sigma) // TODO: avoid sqrt, store precision or at least variance - results.push_back(boost::make_tuple(r, d, 1./sqrt(precision))); + results.push_back(std::make_tuple(r, d, 1./sqrt(precision))); // exit after rank exhausted if (results.size()>=maxRank) break; @@ -565,7 +562,7 @@ void svd(const Matrix& A, Matrix& U, Vector& S, Matrix& V) { } /* ************************************************************************* */ -boost::tuple DLT(const Matrix& A, double rank_tol) { +std::tuple DLT(const Matrix& A, double rank_tol) { // Check size of A size_t n = A.rows(), p = A.cols(), m = min(n,p); @@ -582,7 +579,7 @@ boost::tuple DLT(const Matrix& A, double rank_tol) { // Return rank, error, and corresponding column of V double error = m

((int)rank, error, Vector(column(V, p-1))); + return std::tuple((int)rank, error, Vector(column(V, p-1))); } /* ************************************************************************* */ @@ -613,11 +610,12 @@ std::string formatMatrixIndented(const std::string& label, const Matrix& matrix, else matrixPrinted << matrix; const std::string matrixStr = matrixPrinted.str(); - boost::tokenizer > tok(matrixStr, boost::char_separator("\n")); + // Split the matrix string into lines and indent them + std::string line; + std::istringstream iss(matrixStr); DenseIndex row = 0; - for(const std::string& line: tok) - { + while (std::getline(iss, line)) { assert(row < effectiveRows); if(row > 0) ss << padding; @@ -626,6 +624,7 @@ std::string formatMatrixIndented(const std::string& label, const Matrix& matrix, ss << "\n"; ++ row; } + } else { ss << "Empty (" << matrix.rows() << "x" << matrix.cols() << ")"; } diff --git a/gtsam/base/Matrix.h b/gtsam/base/Matrix.h index 88714d6f6..6dba9cb05 100644 --- a/gtsam/base/Matrix.h +++ b/gtsam/base/Matrix.h @@ -26,7 +26,6 @@ #include #include -#include #include @@ -280,7 +279,7 @@ struct Reshape { template inline typename Reshape::ReshapedType reshape(const Eigen::Matrix & m){ - BOOST_STATIC_ASSERT(InM * InN == OutM * OutN); + static_assert(InM * InN == OutM * OutN); return Reshape::reshape(m); } @@ -307,7 +306,7 @@ GTSAM_EXPORT void inplace_QR(Matrix& A); * @param sigmas is a vector of the measurement standard deviation * @return list of r vectors, d and sigma */ -GTSAM_EXPORT std::list > +GTSAM_EXPORT std::list > weighted_eliminate(Matrix& A, Vector& b, const Vector& sigmas); /** @@ -434,7 +433,7 @@ GTSAM_EXPORT void svd(const Matrix& A, Matrix& U, Vector& S, Matrix& V); * Returns rank of A, minimum error (singular value), * and corresponding eigenvector (column of V, with A=U*S*V') */ -GTSAM_EXPORT boost::tuple +GTSAM_EXPORT std::tuple DLT(const Matrix& A, double rank_tol = 1e-9); /** diff --git a/gtsam/base/ProductLieGroup.h b/gtsam/base/ProductLieGroup.h index 831968bc8..a8bb2ef98 100644 --- a/gtsam/base/ProductLieGroup.h +++ b/gtsam/base/ProductLieGroup.h @@ -27,8 +27,8 @@ namespace gtsam { /// Assumes Lie group structure for G and H template class ProductLieGroup: public std::pair { - BOOST_CONCEPT_ASSERT((IsLieGroup)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsLieGroup); + GTSAM_CONCEPT_ASSERT2(IsLieGroup); typedef std::pair Base; protected: diff --git a/gtsam/base/Testable.h b/gtsam/base/Testable.h index b68d6a97c..8eb6326c0 100644 --- a/gtsam/base/Testable.h +++ b/gtsam/base/Testable.h @@ -33,7 +33,8 @@ #pragma once -#include +#include + #include #include #include @@ -151,7 +152,7 @@ namespace gtsam { struct Testable { // Check that T has the necessary methods - BOOST_CONCEPT_ASSERT((HasTestablePrereqs)); + GTSAM_CONCEPT_ASSERT(HasTestablePrereqs); static void Print(const T& m, const std::string& str = "") { m.print(str); @@ -170,7 +171,7 @@ namespace gtsam { * * NOTE: intentionally not in the gtsam namespace to allow for classes not in * the gtsam namespace to be more easily enforced as testable - * @deprecated please use BOOST_CONCEPT_ASSERT and + * @deprecated please use GTSAM_CONCEPT_ASSERT and */ #define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable; #define GTSAM_CONCEPT_TESTABLE_TYPE(T) using _gtsam_Testable_##T = gtsam::IsTestable; diff --git a/gtsam/base/ThreadsafeException.h b/gtsam/base/ThreadsafeException.h index 57e37237f..79dbe6bb1 100644 --- a/gtsam/base/ThreadsafeException.h +++ b/gtsam/base/ThreadsafeException.h @@ -46,9 +46,11 @@ private: protected: typedef std::basic_string, tbb::tbb_allocator > String; + typedef tbb::tbb_allocator Allocator; #else protected: typedef std::string String; + typedef std::allocator Allocator; #endif protected: diff --git a/gtsam/base/VectorSpace.h b/gtsam/base/VectorSpace.h index 9fbd581bb..f4e3d3020 100644 --- a/gtsam/base/VectorSpace.h +++ b/gtsam/base/VectorSpace.h @@ -185,7 +185,7 @@ template struct VectorSpaceTraits: VectorSpaceImpl { // Check that Class has the necessary machinery - BOOST_CONCEPT_ASSERT((HasVectorSpacePrereqs)); + GTSAM_CONCEPT_ASSERT(HasVectorSpacePrereqs); typedef vector_space_tag structure_category; @@ -473,8 +473,8 @@ public: typedef typename traits::structure_category structure_category_tag; BOOST_CONCEPT_USAGE(IsVectorSpace) { - BOOST_STATIC_ASSERT_MSG( - (boost::is_base_of::value), + static_assert( + (std::is_base_of::value), "This type's trait does not assert it as a vector space (or derived)"); r = p + q; r = -p; diff --git a/gtsam/base/WeightedSampler.h b/gtsam/base/WeightedSampler.h index 7c343b098..790dae3a9 100644 --- a/gtsam/base/WeightedSampler.h +++ b/gtsam/base/WeightedSampler.h @@ -69,7 +69,7 @@ class WeightedSampler { static const double kexp1 = std::exp(1.0); for (auto it = weights.begin(); it != weights.begin() + numSamples; ++it) { const double k_i = kexp1 / *it; - reservoir.push(std::make_pair(k_i, it - weights.begin() + 1)); + reservoir.push({k_i, it - weights.begin() + 1}); } // Step 4: Repeat Steps 5–10 until the population is exhausted @@ -110,7 +110,7 @@ class WeightedSampler { // Step 8: The item in reservoir with the minimum key is replaced by // item v_i reservoir.pop(); - reservoir.push(std::make_pair(k_i, it - weights.begin() + 1)); + reservoir.push({k_i, it - weights.begin() + 1}); } } diff --git a/gtsam/base/chartTesting.h b/gtsam/base/chartTesting.h index 8f5213f91..05b06a489 100644 --- a/gtsam/base/chartTesting.h +++ b/gtsam/base/chartTesting.h @@ -39,7 +39,7 @@ void testDefaultChart(TestResult& result_, // First, check the basic chart concept. This checks that the interface is satisfied. // The rest of the function is even more detailed, checking the correctness of the chart. - BOOST_CONCEPT_ASSERT((ChartConcept)); + GTSAM_CONCEPT_ASSERT(ChartConcept); T other = value; diff --git a/gtsam/base/cholesky.cpp b/gtsam/base/cholesky.cpp index 17cb291f0..71cef2524 100644 --- a/gtsam/base/cholesky.cpp +++ b/gtsam/base/cholesky.cpp @@ -20,7 +20,6 @@ #include #include -#include #include using namespace std; diff --git a/gtsam/base/concepts.h b/gtsam/base/concepts.h index da872d006..242e681da 100644 --- a/gtsam/base/concepts.h +++ b/gtsam/base/concepts.h @@ -8,25 +8,27 @@ #pragma once -// This is a helper to ease the transition to the new traits defined in this file. -// Uncomment this if you want all methods that are tagged as not implemented -// to cause compilation errors. -#ifdef COMPILE_ERROR_NOT_IMPLEMENTED - -#include -#define CONCEPT_NOT_IMPLEMENTED BOOST_STATIC_ASSERT_MSG(boost::false_type, \ -"This method is required by the new concepts framework but has not been implemented yet."); - -#else - -#include -#define CONCEPT_NOT_IMPLEMENTED \ - throw std::runtime_error("This method is required by the new concepts framework but has not been implemented yet."); - +#ifdef GTSAM_USE_BOOST_FEATURES +#include +#include +#include +#include +#define GTSAM_CONCEPT_ASSERT(concept) BOOST_CONCEPT_ASSERT((concept)) +#define GTSAM_CONCEPT_ASSERT1(concept) BOOST_CONCEPT_ASSERT((concept)) +#define GTSAM_CONCEPT_ASSERT2(concept) BOOST_CONCEPT_ASSERT((concept)) +#define GTSAM_CONCEPT_ASSERT3(concept) BOOST_CONCEPT_ASSERT((concept)) +#define GTSAM_CONCEPT_ASSERT4(concept) BOOST_CONCEPT_ASSERT((concept)) +#define GTSAM_CONCEPT_REQUIRES(concept, return_type) BOOST_CONCEPT_REQUIRES(((concept)), (return_type)) +#else +// These do something sensible: +#define BOOST_CONCEPT_USAGE(concept) void check##concept() +// TODO(dellaert): would be nice if it was a single macro... +#define GTSAM_CONCEPT_ASSERT(concept) concept checkConcept [[maybe_unused]]; +#define GTSAM_CONCEPT_ASSERT1(concept) concept checkConcept1 [[maybe_unused]]; +#define GTSAM_CONCEPT_ASSERT2(concept) concept checkConcept2 [[maybe_unused]]; +#define GTSAM_CONCEPT_ASSERT3(concept) concept checkConcept3 [[maybe_unused]]; +#define GTSAM_CONCEPT_ASSERT4(concept) concept checkConcept4 [[maybe_unused]]; +// This one just ignores concept for now: +#define GTSAM_CONCEPT_REQUIRES(concept, return_type) return_type #endif -namespace gtsam { - -template struct traits; - -} diff --git a/gtsam/base/kruskal-inl.h b/gtsam/base/kruskal-inl.h new file mode 100644 index 000000000..d610541a0 --- /dev/null +++ b/gtsam/base/kruskal-inl.h @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file kruskal-inl.h + * @date Dec 31, 2009 + * @author Frank Dellaert + * @author Yong-Dian Jian + * @author Ankur Roy Chowdhury + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace gtsam::utils { + +/*****************************************************************************/ +/* Sort the 'weights' in increasing order and return the sorted order of its + * indices. */ +inline std::vector sortedIndices(const std::vector &weights) { + // Get the number of elements in the 'weights' vector. + const size_t n = weights.size(); + + // Create a vector of 'indices'. + std::vector indices(n); + std::iota(indices.begin(), indices.end(), 0); + + // Sort the 'indices' based on the values in 'weights'. + std::stable_sort(indices.begin(), indices.end(), + [&weights](const size_t i0, const size_t i1) { + return weights[i0] < weights[i1]; + }); + + return indices; +} + +/****************************************************************/ +template +std::vector kruskal(const FactorGraph &fg, + const std::vector &weights) { + if (fg.size() != weights.size()) { + throw std::runtime_error( + "kruskal() failure: fg.size() != weights.size(), all factors need to " + "assigned a weight"); + } + + // Create an index from variables to factor indices. + const VariableIndex variableIndex(fg); + + // Get indices in sort-order of the weights. + const std::vector sortedIndices = + gtsam::utils::sortedIndices(weights); + + // Create a vector to hold MST indices. + const size_t n = variableIndex.size(); + std::vector treeIndices; + treeIndices.reserve(n - 1); + + // Initialize disjoint-set forest to keep track of merged Keys. + DSFMap dsf; + + // Loop over all edges in order of increasing weight. + size_t count = 0; + for (const size_t index : sortedIndices) { + const auto factor = fg[index]; + + // Ignore non-binary edges. + if (factor->size() != 2) continue; + + // Find the representatives of the sets for both the Keys in the binary + // factor. + const auto u = dsf.find(factor->front()), v = dsf.find(factor->back()); + + // Check if there is a potential loop. + const bool loop = (u == v); + if (!loop) { + // Merge the sets. + dsf.merge(u, v); + + // Add the current index to the tree. + treeIndices.push_back(index); + + // Break if all the Keys have been added to the tree. + if (++count == n - 1) break; + } + } + return treeIndices; +} + +} // namespace gtsam::utils diff --git a/gtsam/base/kruskal.h b/gtsam/base/kruskal.h new file mode 100644 index 000000000..058c22435 --- /dev/null +++ b/gtsam/base/kruskal.h @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file kruskal.h + * @date Dec 31, 2009 + * @author Frank Dellaert + * @author Yong-Dian Jian + * @author Ankur Roy Chowdhury + */ + +#pragma once + +#include +#include + +#include + +namespace gtsam::utils { +/** + * Compute the minimum spanning tree (MST) using Kruskal's algorithm + * @param fg Factor graph + * @param weights Weights of the edges/factors in the factor graph + * @return Edge/factor indices spanning the MST + * @note Only binary factors are considered while constructing the spanning tree + * @note The indices of 'weights' should match the indices of the edges in the factor graph + */ +template +std::vector kruskal(const FactorGraph &fg, + const std::vector &weights); +} // namespace gtsam::utils + +#include diff --git a/gtsam/base/numericalDerivative.h b/gtsam/base/numericalDerivative.h index 8c9934d77..8183841b2 100644 --- a/gtsam/base/numericalDerivative.h +++ b/gtsam/base/numericalDerivative.h @@ -71,10 +71,10 @@ typename Eigen::Matrix numericalGradient( std::function h, const X& x, double delta = 1e-5) { double factor = 1.0 / (2.0 * delta); - BOOST_STATIC_ASSERT_MSG( - (boost::is_base_of::structure_category>::value), + static_assert( + (std::is_base_of::structure_category>::value), "Template argument X must be a manifold type."); - BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified."); + static_assert(N>0, "Template argument X must be fixed-size type or N must be specified."); // Prepare a tangent vector to perturb x with, only works for fixed size typename traits::TangentVector d; @@ -111,13 +111,13 @@ typename internal::FixedSizeMatrix::type numericalDerivative11( std::function h, const X& x, double delta = 1e-5) { typedef typename internal::FixedSizeMatrix::type Matrix; - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); typedef traits TraitsY; - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X must be a manifold type."); - BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified."); + static_assert(N>0, "Template argument X must be fixed-size type or N must be specified."); typedef traits TraitsX; // get value at x, and corresponding chart @@ -165,9 +165,9 @@ typename internal::FixedSizeMatrix::type numericalDerivative11(Y (*h)(const template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative21(const std::function& h, const X1& x1, const X2& x2, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::placeholders::_1, std::cref(x2)), x1, delta); @@ -194,9 +194,9 @@ typename internal::FixedSizeMatrix::type numericalDerivative21(Y (*h)(cons template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative22(std::function h, const X1& x1, const X2& x2, double delta = 1e-5) { -// BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), +// static_assert( (std::is_base_of::structure_category>::value), // "Template argument X1 must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X2 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::placeholders::_1), x2, delta); @@ -226,9 +226,9 @@ template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative31( std::function h, const X1& x1, const X2& x2, const X3& x3, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::placeholders::_1, std::cref(x2), std::cref(x3)), @@ -259,9 +259,9 @@ template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative32( std::function h, const X1& x1, const X2& x2, const X3& x3, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X2 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::placeholders::_1, std::cref(x3)), @@ -292,9 +292,9 @@ template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative33( std::function h, const X1& x1, const X2& x2, const X3& x3, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X3 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::placeholders::_1), @@ -325,9 +325,9 @@ template::di typename internal::FixedSizeMatrix::type numericalDerivative41( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::placeholders::_1, std::cref(x2), std::cref(x3), @@ -359,9 +359,9 @@ template::di typename internal::FixedSizeMatrix::type numericalDerivative42( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X2 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::placeholders::_1, std::cref(x3), @@ -393,9 +393,9 @@ template::di typename internal::FixedSizeMatrix::type numericalDerivative43( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X3 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::placeholders::_1, @@ -427,9 +427,9 @@ template::di typename internal::FixedSizeMatrix::type numericalDerivative44( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X4 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::cref(x3), @@ -462,9 +462,9 @@ template::type numericalDerivative51( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::placeholders::_1, std::cref(x2), std::cref(x3), @@ -498,9 +498,9 @@ template::type numericalDerivative52( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::placeholders::_1, std::cref(x3), @@ -534,9 +534,9 @@ template::type numericalDerivative53( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::placeholders::_1, @@ -570,9 +570,9 @@ template::type numericalDerivative54( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::cref(x3), @@ -606,9 +606,9 @@ template::type numericalDerivative55( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::cref(x3), @@ -643,9 +643,9 @@ template::type numericalDerivative61( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::placeholders::_1, std::cref(x2), std::cref(x3), @@ -680,9 +680,9 @@ template::type numericalDerivative62( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::placeholders::_1, std::cref(x3), @@ -717,9 +717,9 @@ template::type numericalDerivative63( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::placeholders::_1, @@ -754,9 +754,9 @@ template::type numericalDerivative64( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::cref(x3), @@ -791,9 +791,9 @@ template::type numericalDerivative65( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::cref(x3), @@ -829,9 +829,9 @@ typename internal::FixedSizeMatrix::type numericalDerivative66( std::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); return numericalDerivative11( std::bind(h, std::cref(x1), std::cref(x2), std::cref(x3), @@ -860,7 +860,7 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative66(Y (* template inline typename internal::FixedSizeMatrix::type numericalHessian(std::function f, const X& x, double delta = 1e-5) { - BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), + static_assert( (std::is_base_of::structure_category>::value), "Template argument X must be a manifold type."); typedef Eigen::Matrix::dimension, 1> VectorD; typedef std::function F; diff --git a/gtsam/base/std_optional_serialization.h b/gtsam/base/std_optional_serialization.h index 07a02c228..ec6eec56e 100644 --- a/gtsam/base/std_optional_serialization.h +++ b/gtsam/base/std_optional_serialization.h @@ -67,7 +67,7 @@ void save(Archive& ar, const std::optional& t, const unsigned int /*version*/ // It is an inherent limitation to the serialization of optional.hpp // that the underlying type must be either a pointer or must have a // default constructor. - BOOST_STATIC_ASSERT(boost::serialization::detail::is_default_constructible::value || boost::is_pointer::value); + static_assert(boost::serialization::detail::is_default_constructible::value || boost::is_pointer::value); const bool tflag = t.has_value(); ar << boost::serialization::make_nvp("initialized", tflag); if (tflag) { diff --git a/gtsam/base/tests/testGroup.cpp b/gtsam/base/tests/testGroup.cpp index 7e8cb7821..3c8b04f39 100644 --- a/gtsam/base/tests/testGroup.cpp +++ b/gtsam/base/tests/testGroup.cpp @@ -80,7 +80,7 @@ using namespace gtsam; typedef Symmetric<2> S2; TEST(Group, S2) { S2 e, s1 = S2::Transposition(0, 1); - BOOST_CONCEPT_ASSERT((IsGroup)); + GTSAM_CONCEPT_ASSERT(IsGroup); EXPECT(check_group_invariants(e, s1)); } @@ -88,7 +88,7 @@ TEST(Group, S2) { typedef Symmetric<3> S3; TEST(Group, S3) { S3 e, s1 = S3::Transposition(0, 1), s2 = S3::Transposition(1, 2); - BOOST_CONCEPT_ASSERT((IsGroup)); + GTSAM_CONCEPT_ASSERT(IsGroup); EXPECT(check_group_invariants(e, s1)); EXPECT(assert_equal(s1, s1 * e)); EXPECT(assert_equal(s1, e * s1)); @@ -127,7 +127,7 @@ struct traits : internal::MultiplicativeGroupTraits { TEST(Group, Dih6) { Dih6 e, g(S2::Transposition(0, 1), S3::Transposition(0, 1) * S3::Transposition(1, 2)); - BOOST_CONCEPT_ASSERT((IsGroup)); + GTSAM_CONCEPT_ASSERT(IsGroup); EXPECT(check_group_invariants(e, g)); EXPECT(assert_equal(e, compose_pow(g, 0))); EXPECT(assert_equal(g, compose_pow(g, 1))); diff --git a/gtsam/base/tests/testKruskal.cpp b/gtsam/base/tests/testKruskal.cpp new file mode 100644 index 000000000..bb8cfcaca --- /dev/null +++ b/gtsam/base/tests/testKruskal.cpp @@ -0,0 +1,105 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testKruskal + * @brief Unit tests for Kruskal's minimum spanning tree algorithm + * @author Ankur Roy Chowdhury + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +gtsam::GaussianFactorGraph makeTestGaussianFactorGraph() { + using namespace gtsam; + using namespace symbol_shorthand; + + GaussianFactorGraph gfg; + Matrix I = I_2x2; + Vector2 b(0, 0); + const SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector2(0.5, 0.5)); + gfg.emplace_shared(X(1), I, X(2), I, b, model); + gfg.emplace_shared(X(1), I, X(3), I, b, model); + gfg.emplace_shared(X(1), I, X(4), I, b, model); + gfg.emplace_shared(X(2), I, X(3), I, b, model); + gfg.emplace_shared(X(2), I, X(4), I, b, model); + gfg.emplace_shared(X(3), I, X(4), I, b, model); + + return gfg; +} + +gtsam::NonlinearFactorGraph makeTestNonlinearFactorGraph() { + using namespace gtsam; + using namespace symbol_shorthand; + + NonlinearFactorGraph nfg; + const SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector2(0.5, 0.5)); + nfg.emplace_shared>(X(1), X(2), Rot3(), model); + nfg.emplace_shared>(X(1), X(3), Rot3(), model); + nfg.emplace_shared>(X(1), X(4), Rot3(), model); + nfg.emplace_shared>(X(2), X(3), Rot3(), model); + nfg.emplace_shared>(X(2), X(4), Rot3(), model); + nfg.emplace_shared>(X(3), X(4), Rot3(), model); + + return nfg; +} + +/* ************************************************************************* */ +TEST(kruskal, GaussianFactorGraph) { + using namespace gtsam; + + // Create factor graph. + const auto g = makeTestGaussianFactorGraph(); + + // Assign weights to all the edges in the graph. + const auto weights = std::vector(g.size(), 1.0); + + const auto mstEdgeIndices = utils::kruskal(g, weights); + + EXPECT(mstEdgeIndices[0] == 0); + EXPECT(mstEdgeIndices[1] == 1); + EXPECT(mstEdgeIndices[2] == 2); +} + +/* ************************************************************************* */ +TEST(kruskal, NonlinearFactorGraph) { + using namespace gtsam; + + // Create factor graph. + const auto g = makeTestNonlinearFactorGraph(); + + // Assign weights to all the edges in the graph. + const auto weights = std::vector(g.size(), 1.0); + + const auto mstEdgeIndices = utils::kruskal(g, weights); + + EXPECT(mstEdgeIndices[0] == 0); + EXPECT(mstEdgeIndices[1] == 1); + EXPECT(mstEdgeIndices[2] == 2); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/base/tests/testMatrix.cpp b/gtsam/base/tests/testMatrix.cpp index fb3bd948c..a000e4864 100644 --- a/gtsam/base/tests/testMatrix.cpp +++ b/gtsam/base/tests/testMatrix.cpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -858,8 +857,7 @@ TEST(Matrix, qr ) Matrix expectedR = (Matrix(6, 4) << 15, 0, -8.3333, 0, 00, 11.1803, 0, -2.2361, 00, 0, 7.4536, 0, 00, 0, 0, 10.9545, 00, 0, 0, 0, 00, 0, 0, 0).finished(); - Matrix Q, R; - boost::tie(Q, R) = qr(A); + const auto [Q, R] = qr(A); EXPECT(assert_equal(expectedQ, Q, 1e-4)); EXPECT(assert_equal(expectedR, R, 1e-4)); EXPECT(assert_equal(A, Q*R, 1e-14)); @@ -911,15 +909,12 @@ TEST(Matrix, weighted_elimination ) // perform elimination Matrix A1 = A; Vector b1 = b; - std::list > solution = + std::list > solution = weighted_eliminate(A1, b1, sigmas); // unpack and verify size_t i = 0; - for (const auto& tuple : solution) { - Vector r; - double di, sigma; - boost::tie(r, di, sigma) = tuple; + for (const auto& [r, di, sigma] : solution) { EXPECT(assert_equal(r, expectedR.row(i))); // verify r DOUBLES_EQUAL(d(i), di, 1e-8); // verify d DOUBLES_EQUAL(newSigmas(i), sigma, 1e-5); // verify sigma @@ -1143,10 +1138,7 @@ TEST(Matrix, DLT ) 1.89, 2.24, 3.99, 3.24, 3.84, 6.84, 18.09, 21.44, 38.19, 2.24, 2.48, 6.24, 3.08, 3.41, 8.58, 24.64, 27.28, 68.64 ).finished(); - int rank; - double error; - Vector actual; - boost::tie(rank,error,actual) = DLT(A); + const auto [rank,error,actual] = DLT(A); Vector expected = (Vector(9) << -0.0, 0.2357, 0.4714, -0.2357, 0.0, - 0.4714,-0.4714, 0.4714, 0.0).finished(); EXPECT_LONGS_EQUAL(8,rank); EXPECT_DOUBLES_EQUAL(0,error,1e-8); @@ -1154,17 +1146,30 @@ TEST(Matrix, DLT ) } //****************************************************************************** -TEST(Matrix , IsVectorSpace) { - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - typedef Eigen::Matrix RowMajor; - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - typedef Eigen::Matrix RowVector; - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); +TEST(Matrix, Matrix24IsVectorSpace) { + GTSAM_CONCEPT_ASSERT(IsVectorSpace); } +TEST(Matrix, RowMajorIsVectorSpace) { + typedef Eigen::Matrix RowMajor; + GTSAM_CONCEPT_ASSERT(IsVectorSpace); +} + +TEST(Matrix, MatrixIsVectorSpace) { + GTSAM_CONCEPT_ASSERT(IsVectorSpace); +} + +TEST(Matrix, VectorIsVectorSpace) { + GTSAM_CONCEPT_ASSERT(IsVectorSpace); +} + +TEST(Matrix, RowVectorIsVectorSpace) { + typedef Eigen::Matrix RowVector; + GTSAM_CONCEPT_ASSERT1(IsVectorSpace); + GTSAM_CONCEPT_ASSERT2(IsVectorSpace); +} + +//****************************************************************************** TEST(Matrix, AbsoluteError) { double a = 2000, b = 1997, tol = 1e-1; bool isEqual; diff --git a/gtsam/base/tests/testVector.cpp b/gtsam/base/tests/testVector.cpp index c87732b09..c749a5191 100644 --- a/gtsam/base/tests/testVector.cpp +++ b/gtsam/base/tests/testVector.cpp @@ -19,7 +19,6 @@ #include #include #include -#include #include using namespace std; @@ -155,8 +154,7 @@ TEST(Vector, weightedPseudoinverse ) Vector weights = sigmas.array().square().inverse(); // perform solve - Vector actual; double precision; - boost::tie(actual, precision) = weightedPseudoinverse(x, weights); + const auto [actual, precision] = weightedPseudoinverse(x, weights); // construct expected Vector expected(2); @@ -180,8 +178,7 @@ TEST(Vector, weightedPseudoinverse_constraint ) sigmas(0) = 0.0; sigmas(1) = 0.2; Vector weights = sigmas.array().square().inverse(); // perform solve - Vector actual; double precision; - boost::tie(actual, precision) = weightedPseudoinverse(x, weights); + const auto [actual, precision] = weightedPseudoinverse(x, weights); // construct expected Vector expected(2); @@ -198,8 +195,7 @@ TEST(Vector, weightedPseudoinverse_nan ) Vector a = (Vector(4) << 1., 0., 0., 0.).finished(); Vector sigmas = (Vector(4) << 0.1, 0.1, 0., 0.).finished(); Vector weights = sigmas.array().square().inverse(); - Vector pseudo; double precision; - boost::tie(pseudo, precision) = weightedPseudoinverse(a, weights); + const auto [pseudo, precision] = weightedPseudoinverse(a, weights); Vector expected = (Vector(4) << 1., 0., 0.,0.).finished(); EXPECT(assert_equal(expected, pseudo)); @@ -270,11 +266,14 @@ TEST(Vector, linear_dependent3 ) } //****************************************************************************** -TEST(Vector, IsVectorSpace) { - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); +TEST(Vector, VectorIsVectorSpace) { + GTSAM_CONCEPT_ASSERT1(IsVectorSpace); + GTSAM_CONCEPT_ASSERT2(IsVectorSpace); +} + +TEST(Vector, RowVectorIsVectorSpace) { typedef Eigen::Matrix RowVector; - BOOST_CONCEPT_ASSERT((IsVectorSpace)); + GTSAM_CONCEPT_ASSERT(IsVectorSpace); } /* ************************************************************************* */ diff --git a/gtsam/base/timing.cpp b/gtsam/base/timing.cpp index 3291415b8..5567ce35d 100644 --- a/gtsam/base/timing.cpp +++ b/gtsam/base/timing.cpp @@ -19,9 +19,6 @@ #include #include -#include -#include - #include #include #include @@ -33,6 +30,9 @@ namespace gtsam { namespace internal { + +// a static shared_ptr to TimingOutline with nullptr as the pointer +const static std::shared_ptr nullTimingOutline; GTSAM_EXPORT std::shared_ptr gTimingRoot( new TimingOutline("Total", getTicTocID("Total"))); @@ -56,13 +56,16 @@ void TimingOutline::add(size_t usecs, size_t usecsWall) { TimingOutline::TimingOutline(const std::string& label, size_t id) : id_(id), t_(0), tWall_(0), t2_(0.0), tIt_(0), tMax_(0), tMin_(0), n_(0), myOrder_( 0), lastChildOrder_(0), label_(label) { +#ifdef GTSAM_USE_BOOST_FEATURES #ifdef GTSAM_USING_NEW_BOOST_TIMERS timer_.stop(); #endif +#endif } /* ************************************************************************* */ size_t TimingOutline::time() const { +#ifdef GTSAM_USE_BOOST_FEATURES size_t time = 0; bool hasChildren = false; for(const ChildMap::value_type& child: children_) { @@ -73,12 +76,16 @@ size_t TimingOutline::time() const { return time; else return t_; +#else + return 0; +#endif } /* ************************************************************************* */ void TimingOutline::print(const std::string& outline) const { +#ifdef GTSAM_USE_BOOST_FEATURES std::string formattedLabel = label_; - boost::replace_all(formattedLabel, "_", " "); + std::replace(formattedLabel.begin(), formattedLabel.end(), '_', ' '); std::cout << outline << "-" << formattedLabel << ": " << self() << " CPU (" << n_ << " times, " << wall() << " wall, " << secs() << " children, min: " << min() << " max: " << max() << ")\n"; @@ -95,11 +102,12 @@ void TimingOutline::print(const std::string& outline) const { order_child.second->print(childOutline); } std::cout.flush(); +#endif } void TimingOutline::print2(const std::string& outline, const double parentTotal) const { - +#ifdef GTSAM_USE_BOOST_FEATURES const int w1 = 24, w2 = 2, w3 = 6, w4 = 8, precision = 2; const double selfTotal = self(), selfMean = selfTotal / double(n_); const double childTotal = secs(); @@ -138,11 +146,13 @@ void TimingOutline::print2(const std::string& outline, child.second->print2(childOutline, selfTotal); } } +#endif } /* ************************************************************************* */ const std::shared_ptr& TimingOutline::child(size_t child, const std::string& label, const std::weak_ptr& thisPtr) { +#ifdef GTSAM_USE_BOOST_FEATURES assert(thisPtr.lock().get() == this); std::shared_ptr& result = children_[child]; if (!result) { @@ -153,10 +163,15 @@ const std::shared_ptr& TimingOutline::child(size_t child, result->parent_ = thisPtr; } return result; +#else + return nullTimingOutline; +#endif } /* ************************************************************************* */ void TimingOutline::tic() { +// Disable this entire function if we are not using boost +#ifdef GTSAM_USE_BOOST_FEATURES #ifdef GTSAM_USING_NEW_BOOST_TIMERS assert(timer_.is_stopped()); timer_.start(); @@ -169,10 +184,14 @@ void TimingOutline::tic() { #ifdef GTSAM_USE_TBB tbbTimer_ = tbb::tick_count::now(); #endif +#endif } /* ************************************************************************* */ void TimingOutline::toc() { +// Disable this entire function if we are not using boost +#ifdef GTSAM_USE_BOOST_FEATURES + #ifdef GTSAM_USING_NEW_BOOST_TIMERS assert(!timer_.is_stopped()); @@ -200,10 +219,12 @@ void TimingOutline::toc() { #endif add(cpuTime, wallTime); +#endif } /* ************************************************************************* */ void TimingOutline::finishedIteration() { +#ifdef GTSAM_USE_BOOST_FEATURES if (tIt_ > tMax_) tMax_ = tIt_; if (tMin_ == 0 || tIt_ < tMin_) @@ -212,55 +233,64 @@ void TimingOutline::finishedIteration() { for(ChildMap::value_type& child: children_) { child.second->finishedIteration(); } +#endif } /* ************************************************************************* */ size_t getTicTocID(const char *descriptionC) { +// disable anything which refers to TimingOutline as well, for good measure +#ifdef GTSAM_USE_BOOST_FEATURES const std::string description(descriptionC); // Global (static) map from strings to ID numbers and current next ID number static size_t nextId = 0; static gtsam::FastMap idMap; // Retrieve or add this string - gtsam::FastMap::const_iterator it = idMap.find( - description); + auto it = idMap.find(description); if (it == idMap.end()) { - it = idMap.insert(std::make_pair(description, nextId)).first; + it = idMap.insert({description, nextId}).first; ++nextId; } // Return ID return it->second; +#else + return 0; +#endif } /* ************************************************************************* */ void tic(size_t id, const char *labelC) { +// disable anything which refers to TimingOutline as well, for good measure +#ifdef GTSAM_USE_BOOST_FEATURES const std::string label(labelC); std::shared_ptr node = // gCurrentTimer.lock()->child(id, label, gCurrentTimer); gCurrentTimer = node; node->tic(); +#endif } /* ************************************************************************* */ void toc(size_t id, const char *label) { +// disable anything which refers to TimingOutline as well, for good measure +#ifdef GTSAM_USE_BOOST_FEATURES std::shared_ptr current(gCurrentTimer.lock()); if (id != current->id_) { gTimingRoot->print(); throw std::invalid_argument( - (boost::format( - "gtsam timing: Mismatched tic/toc: gttoc(\"%s\") called when last tic was \"%s\".") - % label % current->label_).str()); + "gtsam timing: Mismatched tic/toc: gttoc(\"" + std::string(label) + + "\") called when last tic was \"" + current->label_ + "\"."); } if (!current->parent_.lock()) { gTimingRoot->print(); throw std::invalid_argument( - (boost::format( - "gtsam timing: Mismatched tic/toc: extra gttoc(\"%s\"), already at the root") - % label).str()); + "gtsam timing: Mismatched tic/toc: extra gttoc(\"" + std::string(label) + + "\"), already at the root"); } current->toc(); gCurrentTimer = current->parent_; +#endif } } // namespace internal diff --git a/gtsam/base/timing.h b/gtsam/base/timing.h index 52e6adff7..99c55a3d7 100644 --- a/gtsam/base/timing.h +++ b/gtsam/base/timing.h @@ -21,7 +21,9 @@ #include #include // for GTSAM_USE_TBB +#ifdef GTSAM_USE_BOOST_FEATURES #include +#endif #include #include @@ -105,6 +107,7 @@ // have matching gttic/gttoc statments. You may want to consider reorganizing your timing // outline to match the scope of your code. +#ifdef GTSAM_USE_BOOST_FEATURES // Automatically use the new Boost timers if version is recent enough. #if BOOST_VERSION >= 104800 # ifndef GTSAM_DISABLE_NEW_TIMERS @@ -118,6 +121,7 @@ # include # include #endif +#endif #ifdef GTSAM_USE_TBB # include @@ -160,6 +164,8 @@ namespace gtsam { typedef FastMap > ChildMap; ChildMap children_; ///< subtrees +// disable all timers if not using boost +#ifdef GTSAM_USE_BOOST_FEATURES #ifdef GTSAM_USING_NEW_BOOST_TIMERS boost::timer::cpu_timer timer_; #else @@ -168,6 +174,7 @@ namespace gtsam { #endif #ifdef GTSAM_USE_TBB tbb::tick_count tbbTimer_; +#endif #endif void add(size_t usecs, size_t usecsWall); @@ -176,11 +183,20 @@ namespace gtsam { GTSAM_EXPORT TimingOutline(const std::string& label, size_t myId); GTSAM_EXPORT size_t time() const; ///< time taken, including children double secs() const { return double(time()) / 1000000.0;} ///< time taken, in seconds, including children +#ifdef GTSAM_USE_BOOST_FEATURES double self() const { return double(t_) / 1000000.0;} ///< self time only, in seconds double wall() const { return double(tWall_) / 1000000.0;} ///< wall time, in seconds double min() const { return double(tMin_) / 1000000.0;} ///< min time, in seconds double max() const { return double(tMax_) / 1000000.0;} ///< max time, in seconds double mean() const { return self() / double(n_); } ///< mean self time, in seconds +#else + // make them no-ops if not using boost + double self() const { return -1.; } ///< self time only, in seconds + double wall() const { return -1.; } ///< wall time, in seconds + double min() const { return -1.; } ///< min time, in seconds + double max() const { return -1.; } ///< max time, in seconds + double mean() const { return -1.; } ///< mean self time, in seconds +#endif GTSAM_EXPORT void print(const std::string& outline = "") const; GTSAM_EXPORT void print2(const std::string& outline = "", const double parentTotal = -1.0) const; GTSAM_EXPORT const std::shared_ptr& diff --git a/gtsam/base/types.h b/gtsam/base/types.h index 5b6f053cb..3f1c1cd3d 100644 --- a/gtsam/base/types.h +++ b/gtsam/base/types.h @@ -20,8 +20,10 @@ #pragma once #include +#ifdef GTSAM_USE_BOOST_FEATURES #include #include +#endif #include // for GTSAM_USE_TBB #include @@ -153,33 +155,7 @@ namespace gtsam { operator T() const { return value; } }; - /* ************************************************************************* */ - /** A helper class that behaves as a container with one element, and works with - * boost::range */ - template - class ListOfOneContainer { - T element_; - public: - typedef T value_type; - typedef const T* const_iterator; - typedef T* iterator; - ListOfOneContainer(const T& element) : element_(element) {} - const T* begin() const { return &element_; } - const T* end() const { return &element_ + 1; } - T* begin() { return &element_; } - T* end() { return &element_ + 1; } - size_t size() const { return 1; } - }; - - BOOST_CONCEPT_ASSERT((boost::RandomAccessRangeConcept >)); - - /** Factory function for ListOfOneContainer to enable ListOfOne(e) syntax. */ - template - ListOfOneContainer ListOfOne(const T& element) { - return ListOfOneContainer(element); - } - - /* ************************************************************************* */ + /* ************************************************************************* */ #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wunused-private-field" // Clang complains that previousOpenMPThreads is unused in the #else case below @@ -234,27 +210,27 @@ namespace gtsam { #if (_MSC_VER < 1800) -#include +#include namespace std { template inline int isfinite(T a) { - return (int)boost::math::isfinite(a); } + return (int)std::isfinite(a); } template inline int isnan(T a) { - return (int)boost::math::isnan(a); } + return (int)std::isnan(a); } template inline int isinf(T a) { - return (int)boost::math::isinf(a); } + return (int)std::isinf(a); } } #endif -#include +#include #ifndef M_PI -#define M_PI (boost::math::constants::pi()) +#define M_PI (3.14159265358979323846) #endif #ifndef M_PI_2 -#define M_PI_2 (boost::math::constants::pi() / 2.0) +#define M_PI_2 (M_PI / 2.0) #endif #ifndef M_PI_4 -#define M_PI_4 (boost::math::constants::pi() / 4.0) +#define M_PI_4 (M_PI / 4.0) #endif #endif diff --git a/gtsam/base/utilities.h b/gtsam/base/utilities.h index 0a05a704c..03e9636da 100644 --- a/gtsam/base/utilities.h +++ b/gtsam/base/utilities.h @@ -28,16 +28,9 @@ private: } -// boost::index_sequence was introduced in 1.66, so we'll manually define an -// implementation if user has 1.65. boost::index_sequence is used to get array -// indices that align with a parameter pack. -#include -#if BOOST_VERSION >= 106600 -#include -#else -namespace boost { -namespace mp11 { +namespace gtsam { // Adapted from https://stackoverflow.com/a/32223343/9151520 +// An adaptation of boost::mp11::index_sequence template struct index_sequence { using type = index_sequence; @@ -49,20 +42,16 @@ template struct _merge_and_renumber; template -struct _merge_and_renumber, index_sequence > +struct _merge_and_renumber, index_sequence> : index_sequence {}; } // namespace detail template -struct make_index_sequence - : detail::_merge_and_renumber< - typename make_index_sequence::type, - typename make_index_sequence::type> {}; +struct make_index_sequence : detail::_merge_and_renumber::type, + typename make_index_sequence::type> {}; template <> struct make_index_sequence<0> : index_sequence<> {}; template <> struct make_index_sequence<1> : index_sequence<0> {}; template using index_sequence_for = make_index_sequence; -} // namespace mp11 -} // namespace boost -#endif +} // namespace gtsam diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index 177ea5b49..56a79a38a 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -25,10 +25,10 @@ #include #include +#include using namespace std; using namespace gtsam; -using namespace boost::placeholders; namespace { noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1); @@ -119,8 +119,8 @@ TEST(Chebyshev2, InterpolateVector) { EXPECT(assert_equal(expected, fx(X, actualH), 1e-9)); // Check derivative - std::function)> f = boost::bind( - &Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, _1, nullptr); + std::function)> f = std::bind( + &Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = numericalDerivative11, 2 * N>(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-9)); @@ -378,7 +378,7 @@ TEST(Chebyshev2, VectorDerivativeFunctor) { // Test Jacobian Matrix expectedH = numericalDerivative11, M * N>( - boost::bind(&VecD::operator(), fx, _1, nullptr), X); + std::bind(&VecD::operator(), fx, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-7)); } @@ -410,7 +410,7 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { VecD vecd(N, points(0), 0, T); vecd(X, actualH); Matrix expectedH = numericalDerivative11, M * N>( - boost::bind(&VecD::operator(), vecd, _1, nullptr), X); + std::bind(&VecD::operator(), vecd, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-6)); } @@ -427,7 +427,7 @@ TEST(Chebyshev2, ComponentDerivativeFunctor) { EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8); Matrix expectedH = numericalDerivative11, M * N>( - boost::bind(&CompFunc::operator(), fx, _1, nullptr), X); + std::bind(&CompFunc::operator(), fx, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-7)); } diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index b3f0d69b0..425ad15f9 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -24,6 +24,7 @@ #include #include #include +#include #include namespace gtsam { @@ -162,7 +163,9 @@ namespace gtsam { const typename Base::LabelFormatter& labelFormatter = &DefaultFormatter) const { auto valueFormatter = [](const double& v) { - return (boost::format("%4.8g") % v).str(); + std::stringstream ss; + ss << std::setw(4) << std::setprecision(8) << v; + return ss.str(); }; Base::print(s, labelFormatter, valueFormatter); } diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index f17786056..cfaa806b8 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -22,7 +22,6 @@ #include #include -#include #include #include @@ -33,6 +32,7 @@ #include #include #include +#include namespace gtsam { @@ -192,8 +192,8 @@ namespace gtsam { ~Choice() override { #ifdef DT_DEBUG_MEMORY - std::std::cout << Node::nrNodes << " destructing (Choice) " << this->id() - << std::std::endl; + std::cout << Node::nrNodes << " destructing (Choice) " << this->id() + << std::endl; #endif } @@ -282,9 +282,9 @@ namespace gtsam { const ValueFormatter& valueFormatter) const override { std::cout << s << " Choice("; std::cout << labelFormatter(label_) << ") " << std::endl; - for (size_t i = 0; i < branches_.size(); i++) - branches_[i]->print((boost::format("%s %d") % s % i).str(), - labelFormatter, valueFormatter); + for (size_t i = 0; i < branches_.size(); i++) { + branches_[i]->print(s + " " + std::to_string(i), labelFormatter, valueFormatter); + } } /** output to graphviz (as a a graph) */ @@ -643,11 +643,8 @@ namespace gtsam { // Create a simple choice node with values as leaves. if (size != nrChoices) { std::cout << "Trying to create DD on " << begin->first << std::endl; - std::cout << boost::format( - "DecisionTree::create: expected %d values but got %d " - "instead") % - nrChoices % size - << std::endl; + std::cout << "DecisionTree::create: expected " << nrChoices + << " values but got " << size << " instead" << std::endl; throw std::invalid_argument("DecisionTree::create invalid argument"); } auto choice = std::make_shared(begin->first, endY - beginY); diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index f3949b512..ed1908485 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -424,10 +424,10 @@ namespace gtsam { template std::pair, DecisionTree > unzip( const DecisionTree >& input) { - return std::make_pair( + return { DecisionTree(input, [](std::pair i) { return i.first; }), - DecisionTree(input, - [](std::pair i) { return i.second; })); + DecisionTree(input, [](std::pair i) { return i.second; }) + }; } } // namespace gtsam diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index 399216b83..cb6c7761e 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -22,7 +22,6 @@ #include #include -#include #include using namespace std; @@ -79,8 +78,9 @@ namespace gtsam { const KeyFormatter& formatter) const { cout << s; cout << " f["; - for (auto&& key : keys()) - cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key); + for (auto&& key : keys()) { + cout << " (" << formatter(key) << "," << cardinality(key) << "),"; + } cout << " ]" << endl; ADT::print("", formatter); } @@ -104,13 +104,12 @@ namespace gtsam { /* ************************************************************************ */ DecisionTreeFactor::shared_ptr DecisionTreeFactor::combine( size_t nrFrontals, ADT::Binary op) const { - if (nrFrontals > size()) + if (nrFrontals > size()) { throw invalid_argument( - (boost::format( - "DecisionTreeFactor::combine: invalid number of frontal " - "keys %d, nr.keys=%d") % - nrFrontals % size()) - .str()); + "DecisionTreeFactor::combine: invalid number of frontal " + "keys " + + std::to_string(nrFrontals) + ", nr.keys=" + std::to_string(size())); + } // sum over nrFrontals keys size_t i; @@ -132,13 +131,13 @@ namespace gtsam { /* ************************************************************************ */ DecisionTreeFactor::shared_ptr DecisionTreeFactor::combine( const Ordering& frontalKeys, ADT::Binary op) const { - if (frontalKeys.size() > size()) + if (frontalKeys.size() > size()) { throw invalid_argument( - (boost::format( - "DecisionTreeFactor::combine: invalid number of frontal " - "keys %d, nr.keys=%d") % - frontalKeys.size() % size()) - .str()); + "DecisionTreeFactor::combine: invalid number of frontal " + "keys " + + std::to_string(frontalKeys.size()) + ", nr.keys=" + + std::to_string(size())); + } // sum over nrFrontals keys size_t i; @@ -193,7 +192,9 @@ namespace gtsam { /* ************************************************************************ */ static std::string valueFormatter(const double& v) { - return (boost::format("%4.2g") % v).str(); + std::stringstream ss; + ss << std::setw(4) << std::setprecision(2) << std::fixed << v; + return ss.str(); } /** output to graphviz format, stream version */ diff --git a/gtsam/discrete/DecisionTreeFactor.h b/gtsam/discrete/DecisionTreeFactor.h index 74899b729..95054bcdb 100644 --- a/gtsam/discrete/DecisionTreeFactor.h +++ b/gtsam/discrete/DecisionTreeFactor.h @@ -104,7 +104,7 @@ namespace gtsam { return ADT::operator()(values); } - /// Evaluate probability density, sugar. + /// Evaluate probability distribution, sugar. double operator()(const DiscreteValues& values) const override { return ADT::operator()(values); } @@ -268,5 +268,4 @@ namespace gtsam { // traits template <> struct traits : public Testable {}; - } // namespace gtsam diff --git a/gtsam/discrete/DiscreteBayesNet.cpp b/gtsam/discrete/DiscreteBayesNet.cpp index 451f5f07b..f754250ed 100644 --- a/gtsam/discrete/DiscreteBayesNet.cpp +++ b/gtsam/discrete/DiscreteBayesNet.cpp @@ -20,8 +20,6 @@ #include #include -#include - namespace gtsam { // Instantiate base class @@ -58,8 +56,9 @@ DiscreteValues DiscreteBayesNet::sample() const { DiscreteValues DiscreteBayesNet::sample(DiscreteValues result) const { // sample each node in turn in topological sort order (parents first) - for (auto conditional : boost::adaptors::reverse(*this)) - conditional->sampleInPlace(&result); + for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { + (*it)->sampleInPlace(&result); + } return result; } diff --git a/gtsam/discrete/DiscreteBayesTree.h b/gtsam/discrete/DiscreteBayesTree.h index f65508a1a..b0c3dfb7d 100644 --- a/gtsam/discrete/DiscreteBayesTree.h +++ b/gtsam/discrete/DiscreteBayesTree.h @@ -63,7 +63,7 @@ class GTSAM_EXPORT DiscreteBayesTreeClique /* ************************************************************************* */ /** - * @brief A Bayes tree representing a Discrete density. + * @brief A Bayes tree representing a Discrete distribution. * @ingroup discrete */ class GTSAM_EXPORT DiscreteBayesTree diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index c40573ee8..5abc094fb 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -266,7 +266,7 @@ void DiscreteConditional::sampleInPlace(DiscreteValues* values) const { size_t DiscreteConditional::sample(const DiscreteValues& parentsValues) const { static mt19937 rng(2); // random number generator - // Get the correct conditional density + // Get the correct conditional distribution ADT pFS = choose(parentsValues, true); // P(F|S=parentsValues) // TODO(Duy): only works for one key now, seems horribly slow this way diff --git a/gtsam/discrete/DiscreteFactorGraph.cpp b/gtsam/discrete/DiscreteFactorGraph.cpp index 0501cd12e..6c7b6456e 100644 --- a/gtsam/discrete/DiscreteFactorGraph.cpp +++ b/gtsam/discrete/DiscreteFactorGraph.cpp @@ -140,8 +140,7 @@ namespace gtsam { orderedKeys, product); gttoc(lookup); - return std::make_pair( - std::dynamic_pointer_cast(lookup), max); + return {std::dynamic_pointer_cast(lookup), max}; } /* ************************************************************************ */ @@ -223,7 +222,7 @@ namespace gtsam { std::make_shared(product, *sum, orderedKeys); gttoc(divide); - return std::make_pair(conditional, sum); + return {conditional, sum}; } /* ************************************************************************ */ diff --git a/gtsam/discrete/DiscreteKey.cpp b/gtsam/discrete/DiscreteKey.cpp index 06ed2ca3b..79d11d8a7 100644 --- a/gtsam/discrete/DiscreteKey.cpp +++ b/gtsam/discrete/DiscreteKey.cpp @@ -17,7 +17,6 @@ */ #include -#include // for key names #include "DiscreteKey.h" namespace gtsam { @@ -26,7 +25,7 @@ namespace gtsam { DiscreteKeys::DiscreteKeys(const vector& cs) { for (size_t i = 0; i < cs.size(); i++) { - string name = boost::str(boost::format("v%1%") % i); + string name = "v" + std::to_string(i); push_back(DiscreteKey(i, cs[i])); } } diff --git a/gtsam/discrete/DiscreteLookupDAG.cpp b/gtsam/discrete/DiscreteLookupDAG.cpp index bdc77031c..ab62055ed 100644 --- a/gtsam/discrete/DiscreteLookupDAG.cpp +++ b/gtsam/discrete/DiscreteLookupDAG.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -118,8 +119,10 @@ DiscreteLookupDAG DiscreteLookupDAG::FromBayesNet( DiscreteValues DiscreteLookupDAG::argmax(DiscreteValues result) const { // Argmax each node in turn in topological sort order (parents first). - for (auto lookupTable : boost::adaptors::reverse(*this)) - lookupTable->argmaxInPlace(&result); + for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { + // dereference to get the sharedFactor to the lookup table + (*it)->argmaxInPlace(&result); + } return result; } /* ************************************************************************** */ diff --git a/gtsam/discrete/DiscreteMarginals.h b/gtsam/discrete/DiscreteMarginals.h index 7bb2cc6f8..b97e60805 100644 --- a/gtsam/discrete/DiscreteMarginals.h +++ b/gtsam/discrete/DiscreteMarginals.h @@ -41,7 +41,7 @@ class DiscreteMarginals { DiscreteMarginals() {} /** Construct a marginals class. - * @param graph The factor graph defining the full joint density on all variables. + * @param graph The factor graph defining the full joint distribution on all variables. */ DiscreteMarginals(const DiscreteFactorGraph& graph) { bayesTree_ = graph.eliminateMultifrontal(); diff --git a/gtsam/discrete/DiscreteValues.cpp b/gtsam/discrete/DiscreteValues.cpp index b0427e91b..e21fb71db 100644 --- a/gtsam/discrete/DiscreteValues.cpp +++ b/gtsam/discrete/DiscreteValues.cpp @@ -17,7 +17,6 @@ #include -#include #include using std::cout; @@ -39,8 +38,10 @@ void DiscreteValues::print(const string& s, /* ************************************************************************ */ bool DiscreteValues::equals(const DiscreteValues& x, double tol) const { if (this->size() != x.size()) return false; - for (const auto values : boost::combine(*this, x)) { - if (values.get<0>() != values.get<1>()) return false; + auto it1 = x.begin(); + auto it2 = this->begin(); + for (; it1 != x.end(); ++it1, ++it2) { + if (it1->first != it2->first || it1->second != it2->second) return false; } return true; } diff --git a/gtsam/discrete/Signature.cpp b/gtsam/discrete/Signature.cpp index bc045e8c2..b666a2f4b 100644 --- a/gtsam/discrete/Signature.cpp +++ b/gtsam/discrete/Signature.cpp @@ -16,178 +16,129 @@ * @date Feb 27, 2011 */ -#include - #include "Signature.h" -#include // for parsing -#include // for qi::_val +#include +#include + +#include "gtsam/discrete/SignatureParser.h" namespace gtsam { - using namespace std; +using namespace std; - namespace qi = boost::spirit::qi; - namespace ph = boost::phoenix; +ostream& operator<<(ostream& os, const Signature::Row& row) { + os << row[0]; + for (size_t i = 1; i < row.size(); i++) os << " " << row[i]; + return os; +} - // parser for strings of form "99/1 80/20" etc... - namespace parser { - typedef string::const_iterator It; - using boost::phoenix::val; - using boost::phoenix::ref; - using boost::phoenix::push_back; +ostream& operator<<(ostream& os, const Signature::Table& table) { + for (size_t i = 0; i < table.size(); i++) os << table[i] << endl; + return os; +} - // Special rows, true and false - Signature::Row F{1, 0}, T{0, 1}; +Signature::Signature(const DiscreteKey& key, const DiscreteKeys& parents, + const Table& table) + : key_(key), parents_(parents) { + operator=(table); +} - // Special tables (inefficient, but do we care for user input?) - Signature::Table logic(bool ff, bool ft, bool tf, bool tt) { - Signature::Table t(4); - t[0] = ff ? T : F; - t[1] = ft ? T : F; - t[2] = tf ? T : F; - t[3] = tt ? T : F; - return t; - } +Signature::Signature(const DiscreteKey& key, const DiscreteKeys& parents, + const std::string& spec) + : key_(key), parents_(parents) { + operator=(spec); +} - struct Grammar { - qi::rule table, or_, and_, rows; - qi::rule true_, false_, row; - Grammar() { - table = or_ | and_ | rows; - or_ = qi::lit("OR")[qi::_val = logic(false, true, true, true)]; - and_ = qi::lit("AND")[qi::_val = logic(false, false, false, true)]; - rows = +(row | true_ | false_); - row = qi::double_ >> +("/" >> qi::double_); - true_ = qi::lit("T")[qi::_val = T]; - false_ = qi::lit("F")[qi::_val = F]; - } - } grammar; +Signature::Signature(const DiscreteKey& key) : key_(key) {} - } // \namespace parser +DiscreteKeys Signature::discreteKeys() const { + DiscreteKeys keys; + keys.push_back(key_); + for (const DiscreteKey& key : parents_) keys.push_back(key); + return keys; +} - ostream& operator <<(ostream &os, const Signature::Row &row) { - os << row[0]; - for (size_t i = 1; i < row.size(); i++) - os << " " << row[i]; - return os; - } +KeyVector Signature::indices() const { + KeyVector js; + js.push_back(key_.first); + for (const DiscreteKey& key : parents_) js.push_back(key.first); + return js; +} - ostream& operator <<(ostream &os, const Signature::Table &table) { - for (size_t i = 0; i < table.size(); i++) - os << table[i] << endl; - return os; - } - - Signature::Signature(const DiscreteKey& key, const DiscreteKeys& parents, - const Table& table) - : key_(key), parents_(parents) { - operator=(table); - } - - Signature::Signature(const DiscreteKey& key, const DiscreteKeys& parents, - const std::string& spec) - : key_(key), parents_(parents) { - operator=(spec); - } - - Signature::Signature(const DiscreteKey& key) : - key_(key) { - } - - DiscreteKeys Signature::discreteKeys() const { - DiscreteKeys keys; - keys.push_back(key_); - for (const DiscreteKey& key : parents_) keys.push_back(key); - return keys; - } - - KeyVector Signature::indices() const { - KeyVector js; - js.push_back(key_.first); - for (const DiscreteKey& key : parents_) js.push_back(key.first); - return js; - } - - vector Signature::cpt() const { - vector cpt; - if (table_) { - const size_t nrStates = table_->at(0).size(); - for (size_t j = 0; j < nrStates; j++) { - for (const Row& row : *table_) { - assert(row.size() == nrStates); - cpt.push_back(row[j]); - } +vector Signature::cpt() const { + vector cpt; + if (table_) { + const size_t nrStates = table_->at(0).size(); + for (size_t j = 0; j < nrStates; j++) { + for (const Row& row : *table_) { + assert(row.size() == nrStates); + cpt.push_back(row[j]); } } - return cpt; } + return cpt; +} - Signature& Signature::operator,(const DiscreteKey& parent) { - parents_.push_back(parent); - return *this; +Signature &Signature::operator,(const DiscreteKey& parent) { + parents_.push_back(parent); + return *this; +} + +static void normalize(Signature::Row& row) { + double sum = 0; + for (size_t i = 0; i < row.size(); i++) sum += row[i]; + for (size_t i = 0; i < row.size(); i++) row[i] /= sum; +} + +Signature& Signature::operator=(const string& spec) { + spec_ = spec; + auto table = SignatureParser::Parse(spec); + if (table) { + for (Row& row : *table) normalize(row); + table_ = *table; } + return *this; +} - static void normalize(Signature::Row& row) { - double sum = 0; - for (size_t i = 0; i < row.size(); i++) - sum += row[i]; - for (size_t i = 0; i < row.size(); i++) - row[i] /= sum; +Signature& Signature::operator=(const Table& t) { + Table table = t; + for (Row& row : table) normalize(row); + table_ = table; + return *this; +} + +ostream& operator<<(ostream& os, const Signature& s) { + os << s.key_.first; + if (s.parents_.empty()) { + os << " % "; + } else { + os << " | " << s.parents_[0].first; + for (size_t i = 1; i < s.parents_.size(); i++) + os << " && " << s.parents_[i].first; + os << " = "; } + os << (s.spec_ ? *s.spec_ : "no spec") << endl; + if (s.table_) + os << (*s.table_); + else + os << "spec could not be parsed" << endl; + return os; +} - Signature& Signature::operator=(const string& spec) { - spec_ = spec; - Table table; - parser::It f = spec.begin(), l = spec.end(); - bool success = - qi::phrase_parse(f, l, parser::grammar.table, qi::space, table); - if (success) { - for (Row& row : table) normalize(row); - table_ = table; - } - return *this; - } +Signature operator|(const DiscreteKey& key, const DiscreteKey& parent) { + Signature s(key); + return s, parent; +} - Signature& Signature::operator=(const Table& t) { - Table table = t; - for(Row& row: table) - normalize(row); - table_ = table; - return *this; - } +Signature operator%(const DiscreteKey& key, const string& parent) { + Signature s(key); + return s = parent; +} - ostream& operator <<(ostream &os, const Signature &s) { - os << s.key_.first; - if (s.parents_.empty()) { - os << " % "; - } else { - os << " | " << s.parents_[0].first; - for (size_t i = 1; i < s.parents_.size(); i++) - os << " && " << s.parents_[i].first; - os << " = "; - } - os << (s.spec_ ? *s.spec_ : "no spec") << endl; - if (s.table_) - os << (*s.table_); - else - os << "spec could not be parsed" << endl; - return os; - } +Signature operator%(const DiscreteKey& key, const Signature::Table& parent) { + Signature s(key); + return s = parent; +} - Signature operator|(const DiscreteKey& key, const DiscreteKey& parent) { - Signature s(key); - return s, parent; - } - - Signature operator%(const DiscreteKey& key, const string& parent) { - Signature s(key); - return s = parent; - } - - Signature operator%(const DiscreteKey& key, const Signature::Table& parent) { - Signature s(key); - return s = parent; - } - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/discrete/Signature.h b/gtsam/discrete/Signature.h index 963843ab2..df175bc10 100644 --- a/gtsam/discrete/Signature.h +++ b/gtsam/discrete/Signature.h @@ -25,7 +25,7 @@ namespace gtsam { /** - * Signature for a discrete conditional density, used to construct conditionals. + * Signature for a discrete conditional distribution, used to construct conditionals. * * The format is (Key % string) for nodes with no parents, * and (Key | Key, Key = string) for nodes with parents. diff --git a/gtsam/discrete/SignatureParser.cpp b/gtsam/discrete/SignatureParser.cpp new file mode 100644 index 000000000..8e0221f01 --- /dev/null +++ b/gtsam/discrete/SignatureParser.cpp @@ -0,0 +1,119 @@ +#include + +#include +#include +#include +#include +namespace gtsam { + +using Row = std::vector; +using Table = std::vector; + +inline static Row ParseTrueRow() { return {0, 1}; } + +inline static Row ParseFalseRow() { return {1, 0}; } + +inline static Table ParseOr() { + return {ParseFalseRow(), ParseTrueRow(), ParseTrueRow(), ParseTrueRow()}; +} + +inline static Table ParseAnd() { + return {ParseFalseRow(), ParseFalseRow(), ParseFalseRow(), ParseTrueRow()}; +} + +std::optional static ParseConditional(const std::string& token) { + // Expect something like a/b/c + std::istringstream iss2(token); + Row row; + try { + // if the string has no / then return std::nullopt + if (std::count(token.begin(), token.end(), '/') == 0) return std::nullopt; + // split the word on the '/' character + for (std::string s; std::getline(iss2, s, '/');) { + // can throw exception + row.push_back(std::stod(s)); + } + // if we ended with a '/' then return false + if (token.back() == '/') return std::nullopt; + } catch (...) { + return std::nullopt; + } + return row; +} + +std::optional static ParseConditionalTable( + const std::vector& tokens) { + Table table; + // loop over the words + // for each word, split it into doubles using a stringstream + for (const auto& word : tokens) { + // If the string word is F or T then the row is {0,1} or {1,0} respectively + if (word == "F") { + table.push_back(ParseFalseRow()); + } else if (word == "T") { + table.push_back(ParseTrueRow()); + } else { + // Expect something like a/b/c + if (auto row = ParseConditional(word)) { + table.push_back(*row); + } else { + // stop parsing if we encounter an error + return std::nullopt; + } + } + } + return table; +} + +std::vector static Tokenize(const std::string& str) { + std::istringstream iss(str); + std::vector tokens; + for (std::string s; iss >> s;) { + tokens.push_back(s); + } + return tokens; +} + +std::optional
SignatureParser::Parse(const std::string& str) { + // check if string is just whitespace + if (std::all_of(str.begin(), str.end(), isspace)) { + return std::nullopt; + } + + // return std::nullopt if the string is empty + if (str.empty()) { + return std::nullopt; + } + + // tokenize the str on whitespace + std::vector tokens = Tokenize(str); + + // if the first token is "OR", return the OR table + if (tokens[0] == "OR") { + // if there are more tokens, return std::nullopt + if (tokens.size() > 1) { + return std::nullopt; + } + return ParseOr(); + } + + // if the first token is "AND", return the AND table + if (tokens[0] == "AND") { + // if there are more tokens, return std::nullopt + if (tokens.size() > 1) { + return std::nullopt; + } + return ParseAnd(); + } + + // otherwise then parse the conditional table + auto table = ParseConditionalTable(tokens); + // return std::nullopt if the table is empty + if (!table || table->empty()) { + return std::nullopt; + } + // the boost::phoenix parser did not return an error if we could not fully + // parse a string it just returned whatever it could parse + return table; +} +} // namespace gtsam diff --git a/gtsam/discrete/SignatureParser.h b/gtsam/discrete/SignatureParser.h new file mode 100644 index 000000000..e6b402e44 --- /dev/null +++ b/gtsam/discrete/SignatureParser.h @@ -0,0 +1,56 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SignatureParser.h + * @brief Parser for conditional distribution signatures. + * @author Kartik Arcot + * @date January 2023 + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { +/** + * @brief A simple parser that replaces the boost spirit parser. + * + * It is meant to parse strings like "1/1 2/3 1/4". Every word of the form + * "a/b/c/..." is parsed as a row, and the rows are stored in a table. + * + * A `Row` is a vector of doubles, and a `Table` is a vector of rows. + * + * Examples: the parser is able to parse the following strings: + * "1/1 2/3 1/4": + * {{1,1},{2,3},{1,4}} + * "1/1 2/3 1/4 1/1 2/3 1/4" : + * {{1,1},{2,3},{1,4},{1,1},{2,3},{1,4}} + * "1/2/3 2/3/4 1/2/3 2/3/4 1/2/3 2/3/4 1/2/3 2/3/4 1/2/3" : + * {{1,2,3},{2,3,4},{1,2,3},{2,3,4},{1,2,3},{2,3,4},{1,2,3},{2,3,4},{1,2,3}} + * + * If the string has un-parsable elements the parser will fail with nullopt: + * "1/2 sdf" : nullopt ! + * + * It also fails if the string is empty: + * "": nullopt ! + * + * Also fails if the rows are not of the same size. + */ +struct SignatureParser { + using Row = std::vector; + using Table = std::vector; + + static std::optional
Parse(const std::string& str); +}; +} // namespace gtsam diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index e3f9d9dd2..c7cb7088e 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -25,8 +25,6 @@ #include // for convert only #define DISABLE_TIMING -#include - #include #include #include @@ -81,9 +79,9 @@ void resetCounts() { } void printCounts(const string& s) { #ifndef DISABLE_TIMING - cout << boost::format("%s: %3d muls, %3d adds, %g ms.") % s % muls % adds % - (1000 * elapsed) - << endl; +cout << s << ": " << std::setw(3) << muls << " muls, " << + std::setw(3) << adds << " adds, " << 1000 * elapsed << " ms." + << endl; #endif resetCounts(); } @@ -133,7 +131,9 @@ ADT create(const Signature& signature) { ADT p(signature.discreteKeys(), signature.cpt()); static size_t count = 0; const DiscreteKey& key = signature.key(); - string DOTfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str(); + std::stringstream ss; + ss << "CPT-" << std::setw(3) << std::setfill('0') << ++count << "-" << key.first; + string DOTfile = ss.str(); dot(p, DOTfile); return p; } diff --git a/gtsam/discrete/tests/testDecisionTree.cpp b/gtsam/discrete/tests/testDecisionTree.cpp index 3bf12cec1..d2a94ddc3 100644 --- a/gtsam/discrete/tests/testDecisionTree.cpp +++ b/gtsam/discrete/tests/testDecisionTree.cpp @@ -26,6 +26,8 @@ #include #include +#include + using std::vector; using std::string; using std::map; @@ -50,7 +52,9 @@ struct CrazyDecisionTree : public DecisionTree { void print(const std::string& s = "") const { auto keyFormatter = [](const std::string& s) { return s; }; auto valueFormatter = [](const Crazy& v) { - return (boost::format("{%d,%4.2g}") % v.a % v.b).str(); + std::stringstream ss; + ss << "{" << v.a << "," << std::setw(4) << std::setprecision(2) << v.b << "}"; + return ss.str(); }; DecisionTree::print("", keyFormatter, valueFormatter); } @@ -86,7 +90,7 @@ struct DT : public DecisionTree { void print(const std::string& s = "") const { auto keyFormatter = [](const std::string& s) { return s; }; auto valueFormatter = [](const int& v) { - return (boost::format("%d") % v).str(); + return std::to_string(v); }; std::cout << s; Base::print("", keyFormatter, valueFormatter); @@ -458,9 +462,7 @@ TEST(DecisionTree, unzip) { DTP tree(B, DTP(A, {0, "zero"}, {1, "one"}), DTP(A, {2, "two"}, {1337, "l33t"})); - DT1 dt1; - DT2 dt2; - std::tie(dt1, dt2) = unzip(tree); + const auto [dt1, dt2] = unzip(tree); DT1 tree1(B, DT1(A, 0, 1), DT1(A, 2, 1337)); DT2 tree2(B, DT2(A, "zero", "one"), DT2(A, "two", "l33t")); diff --git a/gtsam/discrete/tests/testDiscreteBayesNet.cpp b/gtsam/discrete/tests/testDiscreteBayesNet.cpp index a17a20d41..95f407cae 100644 --- a/gtsam/discrete/tests/testDiscreteBayesNet.cpp +++ b/gtsam/discrete/tests/testDiscreteBayesNet.cpp @@ -94,8 +94,7 @@ TEST(DiscreteBayesNet, Asia) { EXPECT(assert_equal(vs, marginals.marginalProbabilities(Smoking))); // Create solver and eliminate - Ordering ordering; - ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7); + const Ordering ordering{0, 1, 2, 3, 4, 5, 6, 7}; DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering); DiscreteConditional expected2(Bronchitis % "11/9"); EXPECT(assert_equal(expected2, *chordal->back())); diff --git a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp index 9439ff417..bb23b7a83 100644 --- a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp +++ b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp @@ -107,11 +107,8 @@ TEST(DiscreteFactorGraph, test) { graph.add(C & B, "3 1 1 3"); // Test EliminateDiscrete - Ordering frontalKeys; - frontalKeys += Key(0); - DiscreteConditional::shared_ptr conditional; - DecisionTreeFactor::shared_ptr newFactor; - boost::tie(conditional, newFactor) = EliminateDiscrete(graph, frontalKeys); + const Ordering frontalKeys{0}; + const auto [conditional, newFactor] = EliminateDiscrete(graph, frontalKeys); // Check Conditional CHECK(conditional); @@ -125,12 +122,9 @@ TEST(DiscreteFactorGraph, test) { EXPECT(assert_equal(expectedFactor, *newFactor)); // Test using elimination tree - Ordering ordering; - ordering += Key(0), Key(1), Key(2); + const Ordering ordering{0, 1, 2}; DiscreteEliminationTree etree(graph, ordering); - DiscreteBayesNet::shared_ptr actual; - DiscreteFactorGraph::shared_ptr remainingGraph; - boost::tie(actual, remainingGraph) = etree.eliminate(&EliminateDiscrete); + const auto [actual, remainingGraph] = etree.eliminate(&EliminateDiscrete); // Check Bayes net DiscreteBayesNet expectedBayesNet; @@ -235,8 +229,7 @@ TEST(DiscreteFactorGraph, testMPE_Darwiche09book_p244) { EXPECT(assert_equal(mpe, actualMPE)); // Check Bayes Net - Ordering ordering; - ordering += Key(0), Key(1), Key(2), Key(3), Key(4); + const Ordering ordering{0, 1, 2, 3, 4}; auto chordal = graph.eliminateSequential(ordering); EXPECT_LONGS_EQUAL(5, chordal->size()); diff --git a/gtsam/discrete/tests/testSignature.cpp b/gtsam/discrete/tests/testSignature.cpp index 02e7a1d10..bf39e8417 100644 --- a/gtsam/discrete/tests/testSignature.cpp +++ b/gtsam/discrete/tests/testSignature.cpp @@ -29,7 +29,7 @@ using namespace gtsam; DiscreteKey X(0, 2), Y(1, 3), Z(2, 2); /* ************************************************************************* */ -TEST(testSignature, simple_conditional) { +TEST(testSignature, SimpleConditional) { Signature sig(X, {Y}, "1/1 2/3 1/4"); CHECK(sig.table()); Signature::Table table = *sig.table(); @@ -54,7 +54,7 @@ TEST(testSignature, simple_conditional) { } /* ************************************************************************* */ -TEST(testSignature, simple_conditional_nonparser) { +TEST(testSignature, SimpleConditionalNonparser) { Signature::Row row1{1, 1}, row2{2, 3}, row3{1, 4}; Signature::Table table{row1, row2, row3}; @@ -77,7 +77,7 @@ TEST(testSignature, simple_conditional_nonparser) { DiscreteKey A(0, 2), S(1, 2), T(2, 2), L(3, 2), B(4, 2), E(5, 2), D(7, 2); // Make sure we can create all signatures for Asia network with constructor. -TEST(testSignature, all_examples) { +TEST(testSignature, AllExamples) { DiscreteKey X(6, 2); Signature a(A, {}, "99/1"); Signature s(S, {}, "50/50"); @@ -89,7 +89,7 @@ TEST(testSignature, all_examples) { } // Make sure we can create all signatures for Asia network with operator magic. -TEST(testSignature, all_examples_magic) { +TEST(testSignature, AllExamplesMagic) { DiscreteKey X(6, 2); Signature a(A % "99/1"); Signature s(S % "50/50"); @@ -101,7 +101,7 @@ TEST(testSignature, all_examples_magic) { } // Check example from docs. -TEST(testSignature, doxygen_example) { +TEST(testSignature, DoxygenExample) { Signature::Table table{{0.9, 0.1}, {0.2, 0.8}, {0.3, 0.7}, {0.1, 0.9}}; Signature d1(D, {E, B}, table); Signature d2((D | E, B) = "9/1 2/8 3/7 1/9"); diff --git a/gtsam/discrete/tests/testSignatureParser.cpp b/gtsam/discrete/tests/testSignatureParser.cpp new file mode 100644 index 000000000..e0db84697 --- /dev/null +++ b/gtsam/discrete/tests/testSignatureParser.cpp @@ -0,0 +1,103 @@ +/** + * Unit tests for the SimpleParser class. + * @file testSimpleParser.cpp + */ + +#include +#include +#include + +using namespace gtsam; + +/* ************************************************************************* */ +// Simple test case +bool compareTables(const SignatureParser::Table& table1, + const SignatureParser::Table& table2) { + if (table1.size() != table2.size()) { + return false; + } + for (size_t i = 0; i < table1.size(); ++i) { + if (table1[i].size() != table2[i].size()) { + return false; + } + for (size_t j = 0; j < table1[i].size(); ++j) { + if (table1[i][j] != table2[i][j]) { + return false; + } + } + } + return true; +} + +/* ************************************************************************* */ +// Simple test case +TEST(SimpleParser, Simple) { + SignatureParser::Table expectedTable{{1, 1}, {2, 3}, {1, 4}}; + const auto table = SignatureParser::Parse("1/1 2/3 1/4"); + CHECK(table); + // compare the tables + EXPECT(compareTables(*table, expectedTable)); +} + +/* ************************************************************************* */ +// Test case with each row having 3 elements +TEST(SimpleParser, ThreeElements) { + SignatureParser::Table expectedTable{{1, 1, 1}, {2, 3, 2}, {1, 4, 3}}; + const auto table = SignatureParser::Parse("1/1/1 2/3/2 1/4/3"); + CHECK(table); + // compare the tables + EXPECT(compareTables(*table, expectedTable)); +} + +/* ************************************************************************* */ +// A test case to check if we can parse a signature with 'T' and 'F' +TEST(SimpleParser, TAndF) { + SignatureParser::Table expectedTable{{1, 0}, {1, 0}, {1, 0}, {0, 1}}; + const auto table = SignatureParser::Parse("F F F T"); + CHECK(table); + // compare the tables + EXPECT(compareTables(*table, expectedTable)); +} + +/* ************************************************************************* */ +// A test to parse {F F F 1} +TEST(SimpleParser, FFF1) { + SignatureParser::Table expectedTable{{1, 0}, {1, 0}, {1, 0}}; + const auto table = SignatureParser::Parse("F F F"); + CHECK(table); + // compare the tables + EXPECT(compareTables(*table, expectedTable)); +} + +/* ************************************************************************* */ +// Expect false if the string is empty +TEST(SimpleParser, emptyString) { + const auto table = SignatureParser::Parse(""); + EXPECT(!table); +} + +/* ************************************************************************* */ +// Expect false if gibberish +TEST(SimpleParser, Gibberish) { + const auto table = SignatureParser::Parse("sdf 22/3"); + EXPECT(!table); +} + +// If Gibberish is in the middle, it should not parse. +TEST(SimpleParser, GibberishInMiddle) { + const auto table = SignatureParser::Parse("1/1 2/3 sdf 1/4"); + EXPECT(!table); +} + +// A test with slash in the end +TEST(SimpleParser, SlashInEnd) { + const auto table = SignatureParser::Parse("1/1 2/"); + EXPECT(!table); +} + +/* ************************************************************************* */ + +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} diff --git a/gtsam/geometry/BearingRange.h b/gtsam/geometry/BearingRange.h index 7e6210914..19194bb7a 100644 --- a/gtsam/geometry/BearingRange.h +++ b/gtsam/geometry/BearingRange.h @@ -21,7 +21,6 @@ #include #include #include -#include #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #endif diff --git a/gtsam/geometry/CalibratedCamera.h b/gtsam/geometry/CalibratedCamera.h index 277c865c5..aa2ebd270 100644 --- a/gtsam/geometry/CalibratedCamera.h +++ b/gtsam/geometry/CalibratedCamera.h @@ -223,7 +223,7 @@ public: * @return a pair of [start, end] indices into the tangent space vector */ inline static std::pair translationInterval() { - return std::make_pair(3, 5); + return {3, 5}; } /// @} diff --git a/gtsam/geometry/CameraSet.h b/gtsam/geometry/CameraSet.h index 1db86cfde..23a4b467e 100644 --- a/gtsam/geometry/CameraSet.h +++ b/gtsam/geometry/CameraSet.h @@ -153,7 +153,7 @@ class CameraSet : public std::vector> { * full matrices and vectors and pass it to the pointer * version of the function */ - template + template > Vector reprojectionError(const POINT& point, const ZVector& measured, OptArgs&... args) const { // pass it to the pointer version of the function @@ -396,7 +396,7 @@ class CameraSet : public std::vector> { FastMap KeySlotMap; for (size_t slot = 0; slot < allKeys.size(); slot++) - KeySlotMap.insert(std::make_pair(allKeys[slot], slot)); + KeySlotMap.emplace(allKeys[slot], slot); // Schur complement trick // G = F' * F - F' * E * P * E' * F diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index e3ff1416c..c3f588dcc 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -309,14 +309,14 @@ public: * exponential map parameterization * @return a pair of [start, end] indices into the tangent space vector */ - inline static std::pair translationInterval() { return std::make_pair(0, 1); } + inline static std::pair translationInterval() { return {0, 1}; } /** * Return the start and end indices (inclusive) of the rotation component of the * exponential map parameterization * @return a pair of [start, end] indices into the tangent space vector */ - static std::pair rotationInterval() { return std::make_pair(2, 2); } + static std::pair rotationInterval() { return {2, 2}; } /// Output stream operator GTSAM_EXPORT diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 35a9ea301..d30ea38b5 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -364,7 +364,7 @@ public: * @return a pair of [start, end] indices into the tangent space vector */ inline static std::pair translationInterval() { - return std::make_pair(3, 5); + return {3, 5}; } /** @@ -373,7 +373,7 @@ public: * @return a pair of [start, end] indices into the tangent space vector */ static std::pair rotationInterval() { - return std::make_pair(0, 2); + return {0, 2}; } /** diff --git a/gtsam/geometry/Rot3.cpp b/gtsam/geometry/Rot3.cpp index e854d03b7..a936bd02a 100644 --- a/gtsam/geometry/Rot3.cpp +++ b/gtsam/geometry/Rot3.cpp @@ -21,7 +21,6 @@ #include #include -#include #include #include @@ -170,13 +169,13 @@ Vector3 Rot3::xyz(OptionalJacobian<3, 3> H) const { #endif Matrix39 qHm; - boost::tie(I, q) = RQ(m, qHm); + std::tie(I, q) = RQ(m, qHm); // TODO : Explore whether this expression can be optimized as both // qHm and mH are super-sparse *H = qHm * mH; } else - boost::tie(I, q) = RQ(matrix()); + std::tie(I, q) = RQ(matrix()); return q; } diff --git a/gtsam/geometry/Rot3M.cpp b/gtsam/geometry/Rot3M.cpp index 07cc7302a..45e04dab6 100644 --- a/gtsam/geometry/Rot3M.cpp +++ b/gtsam/geometry/Rot3M.cpp @@ -24,7 +24,6 @@ #include #include -#include #include using namespace std; diff --git a/gtsam/geometry/Rot3Q.cpp b/gtsam/geometry/Rot3Q.cpp index 523255d87..3e50551d5 100644 --- a/gtsam/geometry/Rot3Q.cpp +++ b/gtsam/geometry/Rot3Q.cpp @@ -20,7 +20,6 @@ #ifdef GTSAM_USE_QUATERNIONS #include -#include #include using namespace std; diff --git a/gtsam/geometry/Similarity2.cpp b/gtsam/geometry/Similarity2.cpp index 4b8cfd581..c78604953 100644 --- a/gtsam/geometry/Similarity2.cpp +++ b/gtsam/geometry/Similarity2.cpp @@ -31,9 +31,9 @@ namespace internal { static Point2Pairs SubtractCentroids(const Point2Pairs& abPointPairs, const Point2Pair& centroids) { Point2Pairs d_abPointPairs; - for (const Point2Pair& abPair : abPointPairs) { - Point2 da = abPair.first - centroids.first; - Point2 db = abPair.second - centroids.second; + for (const auto& [a, b] : abPointPairs) { + Point2 da = a - centroids.first; + Point2 db = b - centroids.second; d_abPointPairs.emplace_back(da, db); } return d_abPointPairs; @@ -43,10 +43,8 @@ static Point2Pairs SubtractCentroids(const Point2Pairs& abPointPairs, static double CalculateScale(const Point2Pairs& d_abPointPairs, const Rot2& aRb) { double x = 0, y = 0; - Point2 da, db; - for (const Point2Pair& d_abPair : d_abPointPairs) { - std::tie(da, db) = d_abPair; + for (const auto& [da, db] : d_abPointPairs) { const Vector2 da_prime = aRb * db; y += da.transpose() * da_prime; x += da_prime.transpose() * da_prime; @@ -58,8 +56,8 @@ static double CalculateScale(const Point2Pairs& d_abPointPairs, /// Form outer product H. static Matrix2 CalculateH(const Point2Pairs& d_abPointPairs) { Matrix2 H = Z_2x2; - for (const Point2Pair& d_abPair : d_abPointPairs) { - H += d_abPair.first * d_abPair.second.transpose(); + for (const auto& [da, db] : d_abPointPairs) { + H += da * db.transpose(); } return H; } @@ -186,9 +184,7 @@ Similarity2 Similarity2::Align(const Pose2Pairs& abPosePairs) { abPointPairs.reserve(n); // Below denotes the pose of the i'th object/camera/etc // in frame "a" or frame "b". - Pose2 aTi, bTi; - for (const Pose2Pair& abPair : abPosePairs) { - std::tie(aTi, bTi) = abPair; + for (const auto& [aTi, bTi] : abPosePairs) { const Rot2 aRb = aTi.rotation().compose(bTi.rotation().inverse()); rotations.emplace_back(aRb); abPointPairs.emplace_back(aTi.translation(), bTi.translation()); diff --git a/gtsam/geometry/Similarity3.cpp b/gtsam/geometry/Similarity3.cpp index 146161796..a9a369e44 100644 --- a/gtsam/geometry/Similarity3.cpp +++ b/gtsam/geometry/Similarity3.cpp @@ -31,9 +31,9 @@ namespace internal { static Point3Pairs subtractCentroids(const Point3Pairs &abPointPairs, const Point3Pair ¢roids) { Point3Pairs d_abPointPairs; - for (const Point3Pair& abPair : abPointPairs) { - Point3 da = abPair.first - centroids.first; - Point3 db = abPair.second - centroids.second; + for (const auto& [a, b] : abPointPairs) { + Point3 da = a - centroids.first; + Point3 db = b - centroids.second; d_abPointPairs.emplace_back(da, db); } return d_abPointPairs; @@ -46,8 +46,7 @@ static double calculateScale(const Point3Pairs &d_abPointPairs, const Rot3 &aRb) { double x = 0, y = 0; Point3 da, db; - for (const Point3Pair& d_abPair : d_abPointPairs) { - std::tie(da, db) = d_abPair; + for (const auto& [da, db] : d_abPointPairs) { const Vector3 da_prime = aRb * db; y += da.transpose() * da_prime; x += da_prime.transpose() * da_prime; @@ -59,8 +58,8 @@ static double calculateScale(const Point3Pairs &d_abPointPairs, /// Form outer product H. static Matrix3 calculateH(const Point3Pairs &d_abPointPairs) { Matrix3 H = Z_3x3; - for (const Point3Pair& d_abPair : d_abPointPairs) { - H += d_abPair.first * d_abPair.second.transpose(); + for (const auto& [da, db] : d_abPointPairs) { + H += da * db.transpose(); } return H; } @@ -184,8 +183,7 @@ Similarity3 Similarity3::Align(const vector &abPosePairs) { abPointPairs.reserve(n); // Below denotes the pose of the i'th object/camera/etc in frame "a" or frame "b" Pose3 aTi, bTi; - for (const Pose3Pair &abPair : abPosePairs) { - std::tie(aTi, bTi) = abPair; + for (const auto &[aTi, bTi] : abPosePairs) { const Rot3 aRb = aTi.rotation().compose(bTi.rotation().inverse()); rotations.emplace_back(aRb); abPointPairs.emplace_back(aTi.translation(), bTi.translation()); diff --git a/gtsam/geometry/tests/testBearingRange.cpp b/gtsam/geometry/tests/testBearingRange.cpp index 1216a6396..ff2a9a6a4 100644 --- a/gtsam/geometry/tests/testBearingRange.cpp +++ b/gtsam/geometry/tests/testBearingRange.cpp @@ -34,7 +34,7 @@ BearingRange3D br3D(Pose3().bearing(Point3(1, 0, 0)), 1); //****************************************************************************** TEST(BearingRange2D, Concept) { - BOOST_CONCEPT_ASSERT((IsManifold)); + GTSAM_CONCEPT_ASSERT(IsManifold); } /* ************************************************************************* */ @@ -46,7 +46,7 @@ TEST(BearingRange, 2D) { //****************************************************************************** TEST(BearingRange3D, Concept) { - BOOST_CONCEPT_ASSERT((IsManifold)); + GTSAM_CONCEPT_ASSERT(IsManifold); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testCyclic.cpp b/gtsam/geometry/tests/testCyclic.cpp index abcef9e5b..e2b250f50 100644 --- a/gtsam/geometry/tests/testCyclic.cpp +++ b/gtsam/geometry/tests/testCyclic.cpp @@ -27,7 +27,7 @@ typedef Cyclic<2> Z2; //****************************************************************************** TEST(Cyclic, Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); + GTSAM_CONCEPT_ASSERT(IsGroup); EXPECT_LONGS_EQUAL(0, traits::Identity()); } @@ -107,8 +107,8 @@ struct traits : internal::AdditiveGroupTraits { TEST(Cyclic , DirectSum) { // The Direct sum of Z2 and Z2 is *not* Cyclic<4>, but the // smallest non-cyclic group called the Klein four-group: - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsTestable)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsTestable); // Refer to http://en.wikipedia.org/wiki/Klein_four-group K4 e(0,0), a(0, 1), b(1, 0), c(1, 1); diff --git a/gtsam/geometry/tests/testPoint2.cpp b/gtsam/geometry/tests/testPoint2.cpp index 6e4d408c7..4083970b6 100644 --- a/gtsam/geometry/tests/testPoint2.cpp +++ b/gtsam/geometry/tests/testPoint2.cpp @@ -34,9 +34,9 @@ TEST(Point2 , Constructor) { //****************************************************************************** TEST(Double , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsVectorSpace); } //****************************************************************************** @@ -48,9 +48,9 @@ TEST(Double , Invariants) { //****************************************************************************** TEST(Point2 , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsVectorSpace); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testPoint3.cpp b/gtsam/geometry/tests/testPoint3.cpp index ad9f29620..e4263ffd7 100644 --- a/gtsam/geometry/tests/testPoint3.cpp +++ b/gtsam/geometry/tests/testPoint3.cpp @@ -34,9 +34,9 @@ TEST(Point3 , Constructor) { //****************************************************************************** TEST(Point3 , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsVectorSpace); } //****************************************************************************** @@ -176,7 +176,7 @@ TEST(Point3, mean) { TEST(Point3, mean_pair) { Point3 a_mean(2, 2, 2), b_mean(-1, 1, 0); - Point3Pair expected = std::make_pair(a_mean, b_mean); + Point3Pair expected = {a_mean, b_mean}; Point3 a1(0, 0, 0), a2(1, 2, 3), a3(5, 4, 3); Point3 b1(-1, 0, 0), b2(-2, 4, 0), b3(0, -1, 0); std::vector point_pairs{{a1, b1}, {a2, b2}, {a3, b3}}; diff --git a/gtsam/geometry/tests/testPose2.cpp b/gtsam/geometry/tests/testPose2.cpp index e38bfbd75..3985f6ba5 100644 --- a/gtsam/geometry/tests/testPose2.cpp +++ b/gtsam/geometry/tests/testPose2.cpp @@ -35,9 +35,9 @@ GTSAM_CONCEPT_LIE_INST(Pose2) //****************************************************************************** TEST(Pose2 , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testQuaternion.cpp b/gtsam/geometry/tests/testQuaternion.cpp index 281c71f7c..546a1f5b5 100644 --- a/gtsam/geometry/tests/testQuaternion.cpp +++ b/gtsam/geometry/tests/testQuaternion.cpp @@ -29,9 +29,9 @@ typedef traits::ChartJacobian QuaternionJacobian; //****************************************************************************** TEST(Quaternion , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index 0e7967715..39cc05fde 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -24,8 +24,6 @@ #include #include -#include - #include using namespace std; @@ -40,9 +38,9 @@ static double error = 1e-9, epsilon = 0.001; //****************************************************************************** TEST(Rot3 , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } /* ************************************************************************* */ @@ -128,10 +126,8 @@ TEST( Rot3, AxisAngle2) // constructor from a rotation matrix, as doubles in *row-major* order. Rot3 R1(-0.999957, 0.00922903, 0.00203116, 0.00926964, 0.999739, 0.0208927, -0.0018374, 0.0209105, -0.999781); - Unit3 actualAxis; - double actualAngle; // convert Rot3 to quaternion using GTSAM - std::tie(actualAxis, actualAngle) = R1.axisAngle(); + const auto [actualAxis, actualAngle] = R1.axisAngle(); double expectedAngle = 3.1396582; CHECK(assert_equal(expectedAngle, actualAngle, 1e-5)); @@ -197,7 +193,7 @@ TEST( Rot3, retract) /* ************************************************************************* */ TEST( Rot3, log) { - static const double PI = boost::math::constants::pi(); + static const double PI = std::acos(-1.0); Vector w; Rot3 R; @@ -508,11 +504,9 @@ TEST( Rot3, yaw_pitch_roll ) TEST( Rot3, RQ) { // Try RQ on a pure rotation - Matrix actualK; - Vector actual; - boost::tie(actualK, actual) = RQ(R.matrix()); + const auto [actualK, actual] = RQ(R.matrix()); Vector expected = Vector3(0.14715, 0.385821, 0.231671); - CHECK(assert_equal(I_3x3,actualK)); + CHECK(assert_equal(I_3x3, (Matrix)actualK)); CHECK(assert_equal(expected,actual,1e-6)); // Try using xyz call, asserting that Rot3::RzRyRx(x,y,z).xyz()==[x;y;z] @@ -531,9 +525,9 @@ TEST( Rot3, RQ) // Try RQ to recover calibration from 3*3 sub-block of projection matrix Matrix K = (Matrix(3, 3) << 500.0, 0.0, 320.0, 0.0, 500.0, 240.0, 0.0, 0.0, 1.0).finished(); Matrix A = K * R.matrix(); - boost::tie(actualK, actual) = RQ(A); - CHECK(assert_equal(K,actualK)); - CHECK(assert_equal(expected,actual,1e-6)); + const auto [actualK2, actual2] = RQ(A); + CHECK(assert_equal(K, actualK2)); + CHECK(assert_equal(expected, actual2, 1e-6)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testSO3.cpp b/gtsam/geometry/tests/testSO3.cpp index 1afe9ff21..2086318cb 100644 --- a/gtsam/geometry/tests/testSO3.cpp +++ b/gtsam/geometry/tests/testSO3.cpp @@ -36,9 +36,9 @@ TEST(SO3, Identity) { //****************************************************************************** TEST(SO3, Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testSO4.cpp b/gtsam/geometry/tests/testSO4.cpp index fa550723a..17d9694be 100644 --- a/gtsam/geometry/tests/testSO4.cpp +++ b/gtsam/geometry/tests/testSO4.cpp @@ -42,9 +42,9 @@ TEST(SO4, Identity) { //****************************************************************************** TEST(SO4, Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testSOn.cpp b/gtsam/geometry/tests/testSOn.cpp index d9d4da34c..eb453d8c6 100644 --- a/gtsam/geometry/tests/testSOn.cpp +++ b/gtsam/geometry/tests/testSOn.cpp @@ -84,9 +84,9 @@ TEST(SOn, SO5) { //****************************************************************************** TEST(SOn, Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testSimilarity2.cpp b/gtsam/geometry/tests/testSimilarity2.cpp index ca041fc7b..8d537fd72 100644 --- a/gtsam/geometry/tests/testSimilarity2.cpp +++ b/gtsam/geometry/tests/testSimilarity2.cpp @@ -35,9 +35,9 @@ static const double s = 4; //****************************************************************************** TEST(Similarity2, Concepts) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testSimilarity3.cpp b/gtsam/geometry/tests/testSimilarity3.cpp index fad24f743..d0f661787 100644 --- a/gtsam/geometry/tests/testSimilarity3.cpp +++ b/gtsam/geometry/tests/testSimilarity3.cpp @@ -54,9 +54,9 @@ const double degree = M_PI / 180; //****************************************************************************** TEST(Similarity3, Concepts) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); } //****************************************************************************** diff --git a/gtsam/geometry/tests/testStereoPoint2.cpp b/gtsam/geometry/tests/testStereoPoint2.cpp index ddcc9238a..236ea8b01 100644 --- a/gtsam/geometry/tests/testStereoPoint2.cpp +++ b/gtsam/geometry/tests/testStereoPoint2.cpp @@ -31,9 +31,9 @@ GTSAM_CONCEPT_TESTABLE_INST(StereoPoint2) //****************************************************************************** TEST(StereoPoint2 , Concept) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsVectorSpace); } /* ************************************************************************* */ diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index acb0ddf0e..06fb8fafe 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -48,10 +48,7 @@ Vector4 triangulateHomogeneousDLT( A.row(row) = p.x() * projection.row(2) - projection.row(0); A.row(row + 1) = p.y() * projection.row(2) - projection.row(1); } - int rank; - double error; - Vector v; - boost::tie(rank, error, v) = DLT(A, rank_tol); + const auto [rank, error, v] = DLT(A, rank_tol); if (rank < 3) throw(TriangulationUnderconstrainedException()); @@ -79,10 +76,7 @@ Vector4 triangulateHomogeneousDLT( A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1); } - int rank; - double error; - Vector v; - boost::tie(rank, error, v) = DLT(A, rank_tol); + const auto [rank, error, v] = DLT(A, rank_tol); if (rank < 3) throw(TriangulationUnderconstrainedException()); diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 24bc94d80..7e58cee2d 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -137,7 +137,7 @@ std::pair triangulationGraph( graph.emplace_shared > // (camera_i, measurements[i], model, landmarkKey); } - return std::make_pair(graph, values); + return {graph, values}; } /** @@ -165,7 +165,7 @@ std::pair triangulationGraph( graph.emplace_shared > // (camera_i, measurements[i], model? model : unit, landmarkKey); } - return std::make_pair(graph, values); + return {graph, values}; } /** @@ -193,9 +193,7 @@ Point3 triangulateNonlinear(const std::vector& poses, const SharedNoiseModel& model = nullptr) { // Create a factor graph and initial values - Values values; - NonlinearFactorGraph graph; - boost::tie(graph, values) = triangulationGraph // + const auto [graph, values] = triangulationGraph // (poses, sharedCal, measurements, Symbol('p', 0), initialEstimate, model); return optimize(graph, values, Symbol('p', 0)); @@ -215,9 +213,7 @@ Point3 triangulateNonlinear( const SharedNoiseModel& model = nullptr) { // Create a factor graph and initial values - Values values; - NonlinearFactorGraph graph; - boost::tie(graph, values) = triangulationGraph // + const auto [graph, values] = triangulationGraph // (cameras, measurements, Symbol('p', 0), initialEstimate, model); return optimize(graph, values, Symbol('p', 0)); diff --git a/gtsam/hybrid/HybridFactorGraph.cpp b/gtsam/hybrid/HybridFactorGraph.cpp index 631f8d22f..d96a890f4 100644 --- a/gtsam/hybrid/HybridFactorGraph.cpp +++ b/gtsam/hybrid/HybridFactorGraph.cpp @@ -20,8 +20,6 @@ #include #include -#include - namespace gtsam { /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridFactorGraph.h b/gtsam/hybrid/HybridFactorGraph.h index af0cea746..b8e7ff588 100644 --- a/gtsam/hybrid/HybridFactorGraph.h +++ b/gtsam/hybrid/HybridFactorGraph.h @@ -22,7 +22,6 @@ #include #include -#include #include namespace gtsam { diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 346f2141a..a8c674dec 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -251,9 +251,7 @@ hybridElimination(const HybridGaussianFactorGraph &factors, #endif // Separate out decision tree into conditionals and remaining factors. - GaussianMixture::Conditionals conditionals; - GaussianMixtureFactor::Factors newFactors; - std::tie(conditionals, newFactors) = unzip(eliminationResults); + const auto [conditionals, newFactors] = unzip(eliminationResults); // Create the GaussianMixture from the conditionals auto gaussianMixture = std::make_shared( diff --git a/gtsam/hybrid/HybridGaussianISAM.cpp b/gtsam/hybrid/HybridGaussianISAM.cpp index 3e5a4f1d1..0dd5fa38b 100644 --- a/gtsam/hybrid/HybridGaussianISAM.cpp +++ b/gtsam/hybrid/HybridGaussianISAM.cpp @@ -84,12 +84,12 @@ void HybridGaussianISAM::updateInternal( // Add the removed top and the new factors HybridGaussianFactorGraph factors; - factors += bn; - factors += newFactors; + factors.push_back(bn); + factors.push_back(newFactors); // Add the orphaned subtrees for (const sharedClique& orphan : *orphans) { - factors += std::make_shared>(orphan); + factors.emplace_shared>(orphan); } const VariableIndex index(factors); diff --git a/gtsam/hybrid/HybridJunctionTree.cpp b/gtsam/hybrid/HybridJunctionTree.cpp index 6c0e62ac9..6f2898bf1 100644 --- a/gtsam/hybrid/HybridJunctionTree.cpp +++ b/gtsam/hybrid/HybridJunctionTree.cpp @@ -94,15 +94,13 @@ struct HybridConstructorTraversalData { symbolicFactors.reserve(node->factors.size() + data.childSymbolicFactors.size()); // Add ETree node factors - symbolicFactors += node->factors; + symbolicFactors.push_back(node->factors); // Add symbolic factors passed up from children - symbolicFactors += data.childSymbolicFactors; + symbolicFactors.push_back(data.childSymbolicFactors); Ordering keyAsOrdering; keyAsOrdering.push_back(node->key); - SymbolicConditional::shared_ptr conditional; - SymbolicFactor::shared_ptr separatorFactor; - boost::tie(conditional, separatorFactor) = + const auto [conditional, separatorFactor] = internal::EliminateSymbolic(symbolicFactors, keyAsOrdering); // Store symbolic elimination results in the parent @@ -129,9 +127,9 @@ struct HybridConstructorTraversalData { // Check if we should merge the i^th child if (nrParents + nrFrontals == childConditionals[i]->nrParents()) { const bool myType = - data.discreteKeys.exists(conditional->frontals()[0]); + data.discreteKeys.exists(conditional->frontals().front()); const bool theirType = - data.discreteKeys.exists(childConditionals[i]->frontals()[0]); + data.discreteKeys.exists(childConditionals[i]->frontals().front()); if (myType == theirType) { // Increment number of frontal variables diff --git a/gtsam/hybrid/MixtureFactor.h b/gtsam/hybrid/MixtureFactor.h index cbc949404..cc0a304a2 100644 --- a/gtsam/hybrid/MixtureFactor.h +++ b/gtsam/hybrid/MixtureFactor.h @@ -27,7 +27,6 @@ #include #include -#include #include #include #include @@ -194,7 +193,7 @@ class MixtureFactor : public HybridFactor { std::cout << "\nMixtureFactor\n"; auto valueFormatter = [](const sharedFactor& v) { if (v) { - return (boost::format("Nonlinear factor on %d keys") % v->size()).str(); + return "Nonlinear factor on " + std::to_string(v->size()) + " keys"; } else { return std::string("nullptr"); } diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index c4bac1df2..f25675a55 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -131,9 +131,7 @@ TEST(HybridBayesNet, Choose) { const Ordering ordering(s.linearizationPoint.keys()); - HybridBayesNet::shared_ptr hybridBayesNet; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); DiscreteValues assignment; @@ -162,9 +160,7 @@ TEST(HybridBayesNet, OptimizeAssignment) { const Ordering ordering(s.linearizationPoint.keys()); - HybridBayesNet::shared_ptr hybridBayesNet; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); DiscreteValues assignment; diff --git a/gtsam/hybrid/tests/testHybridBayesTree.cpp b/gtsam/hybrid/tests/testHybridBayesTree.cpp index 2c2bdc3c0..578f5d605 100644 --- a/gtsam/hybrid/tests/testHybridBayesTree.cpp +++ b/gtsam/hybrid/tests/testHybridBayesTree.cpp @@ -97,11 +97,9 @@ TEST(HybridBayesTree, OptimizeAssignment) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < s.K; k++) ordering += X(k); + for (size_t k = 0; k < s.K; k++) ordering.push_back(X(k)); - HybridBayesNet::shared_ptr hybridBayesNet; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); GaussianBayesNet gbn = hybridBayesNet->choose(assignment); @@ -141,11 +139,9 @@ TEST(HybridBayesTree, Optimize) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < s.K; k++) ordering += X(k); + for (size_t k = 0; k < s.K; k++) ordering.push_back(X(k)); - HybridBayesNet::shared_ptr hybridBayesNet; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); DiscreteFactorGraph dfg; diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 55d2bc248..28f47232b 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -37,6 +37,8 @@ #include "Switching.h" +#include + using namespace std; using namespace gtsam; @@ -44,7 +46,7 @@ using symbol_shorthand::X; Ordering getOrdering(HybridGaussianFactorGraph& factors, const HybridGaussianFactorGraph& newFactors) { - factors += newFactors; + factors.push_back(newFactors); // Get all the discrete keys from the factors KeySet allDiscrete = factors.discreteKeySet(); @@ -83,10 +85,10 @@ TEST(HybridEstimation, Full) { Ordering hybridOrdering; for (size_t k = 0; k < K; k++) { - hybridOrdering += X(k); + hybridOrdering.push_back(X(k)); } for (size_t k = 0; k < K - 1; k++) { - hybridOrdering += M(k); + hybridOrdering.push_back(M(k)); } HybridBayesNet::shared_ptr bayesNet = @@ -239,10 +241,8 @@ std::vector getDiscreteSequence(size_t x) { */ AlgebraicDecisionTree getProbPrimeTree( const HybridGaussianFactorGraph& graph) { - HybridBayesNet::shared_ptr bayesNet; - HybridGaussianFactorGraph::shared_ptr remainingGraph; Ordering continuous(graph.continuousKeySet()); - std::tie(bayesNet, remainingGraph) = + const auto [bayesNet, remainingGraph] = graph.eliminatePartialSequential(continuous); auto last_conditional = bayesNet->at(bayesNet->size() - 1); @@ -297,9 +297,7 @@ TEST(HybridEstimation, Probability) { // Continuous elimination Ordering continuous_ordering(graph.continuousKeySet()); - HybridBayesNet::shared_ptr bayesNet; - HybridGaussianFactorGraph::shared_ptr discreteGraph; - std::tie(bayesNet, discreteGraph) = + auto [bayesNet, discreteGraph] = graph.eliminatePartialSequential(continuous_ordering); // Discrete elimination @@ -347,9 +345,7 @@ TEST(HybridEstimation, ProbabilityMultifrontal) { // Eliminate continuous Ordering continuous_ordering(graph.continuousKeySet()); - HybridBayesTree::shared_ptr bayesTree; - HybridGaussianFactorGraph::shared_ptr discreteGraph; - std::tie(bayesTree, discreteGraph) = + const auto [bayesTree, discreteGraph] = graph.eliminatePartialMultifrontal(continuous_ordering); // Get the last continuous conditional which will have all the discrete keys @@ -448,8 +444,7 @@ TEST(HybridEstimation, eliminateSequentialRegression) { DiscreteConditional expected(m % "0.51341712/1"); // regression // Eliminate into BN using one ordering - Ordering ordering1; - ordering1 += X(0), X(1), M(0); + const Ordering ordering1{X(0), X(1), M(0)}; HybridBayesNet::shared_ptr bn1 = fg->eliminateSequential(ordering1); // Check that the discrete conditional matches the expected. @@ -457,8 +452,7 @@ TEST(HybridEstimation, eliminateSequentialRegression) { EXPECT(assert_equal(expected, *dc1, 1e-9)); // Eliminate into BN using a different ordering - Ordering ordering2; - ordering2 += X(0), X(1), M(0); + const Ordering ordering2{X(0), X(1), M(0)}; HybridBayesNet::shared_ptr bn2 = fg->eliminateSequential(ordering2); // Check that the discrete conditional matches the expected. diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 4c8c5518e..59040594d 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -270,9 +270,7 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { auto ordering_full = Ordering::ColamdConstrainedLast(hfg, {M(0), M(1), M(2), M(3)}); - HybridBayesTree::shared_ptr hbt; - HybridGaussianFactorGraph::shared_ptr remaining; - std::tie(hbt, remaining) = hfg.eliminatePartialMultifrontal(ordering_full); + const auto [hbt, remaining] = hfg.eliminatePartialMultifrontal(ordering_full); // 9 cliques in the bayes tree and 0 remaining variables to eliminate. EXPECT_LONGS_EQUAL(9, hbt->size()); @@ -327,10 +325,9 @@ TEST(HybridGaussianFactorGraph, Switching) { std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), [](int x) { return X(x); }); - KeyVector ndX; - std::vector lvls; - std::tie(ndX, lvls) = makeBinaryOrdering(ordX); + auto [ndX, lvls] = makeBinaryOrdering(ordX); std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + // TODO(dellaert): this has no effect! for (auto &l : lvls) { l = -l; } @@ -341,11 +338,9 @@ TEST(HybridGaussianFactorGraph, Switching) { std::vector ordC; std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), [](int x) { return M(x); }); - KeyVector ndC; - std::vector lvls; // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); - std::tie(ndC, lvls) = makeBinaryOrdering(ordC); + const auto [ndC, lvls] = makeBinaryOrdering(ordC); std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); } auto ordering_full = Ordering(ordering); @@ -353,9 +348,7 @@ TEST(HybridGaussianFactorGraph, Switching) { // GTSAM_PRINT(*hfg); // GTSAM_PRINT(ordering_full); - HybridBayesTree::shared_ptr hbt; - HybridGaussianFactorGraph::shared_ptr remaining; - std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full); + const auto [hbt, remaining] = hfg->eliminatePartialMultifrontal(ordering_full); // 12 cliques in the bayes tree and 0 remaining variables to eliminate. EXPECT_LONGS_EQUAL(12, hbt->size()); @@ -388,10 +381,9 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), [](int x) { return X(x); }); - KeyVector ndX; - std::vector lvls; - std::tie(ndX, lvls) = makeBinaryOrdering(ordX); + auto [ndX, lvls] = makeBinaryOrdering(ordX); std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + // TODO(dellaert): this has no effect! for (auto &l : lvls) { l = -l; } @@ -402,18 +394,14 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::vector ordC; std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), [](int x) { return M(x); }); - KeyVector ndC; - std::vector lvls; // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); - std::tie(ndC, lvls) = makeBinaryOrdering(ordC); + const auto [ndC, lvls] = makeBinaryOrdering(ordC); std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); } auto ordering_full = Ordering(ordering); - HybridBayesTree::shared_ptr hbt; - HybridGaussianFactorGraph::shared_ptr remaining; - std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full); + const auto [hbt, remaining] = hfg->eliminatePartialMultifrontal(ordering_full); auto new_fg = makeSwitchingChain(12); auto isam = HybridGaussianISAM(*hbt); @@ -482,9 +470,7 @@ TEST(HybridGaussianFactorGraph, SwitchingTwoVar) { ordering_partial.emplace_back(X(i)); ordering_partial.emplace_back(Y(i)); } - HybridBayesNet::shared_ptr hbn; - HybridGaussianFactorGraph::shared_ptr remaining; - std::tie(hbn, remaining) = hfg->eliminatePartialSequential(ordering_partial); + const auto [hbn, remaining] = hfg->eliminatePartialSequential(ordering_partial); EXPECT_LONGS_EQUAL(14, hbn->size()); EXPECT_LONGS_EQUAL(11, remaining->size()); @@ -847,10 +833,9 @@ TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { EXPECT(ratioTest(bn, measurements, fg)); // Do elimination of X(2) only: - auto result = fg.eliminatePartialSequential(Ordering{X(2)}); - auto fg1 = *result.second; - fg1.push_back(*result.first); - EXPECT(ratioTest(bn, measurements, fg1)); + auto [bn1, fg1] = fg.eliminatePartialSequential(Ordering{X(2)}); + fg1->push_back(*bn1); + EXPECT(ratioTest(bn, measurements, *fg1)); // Create ordering that eliminates in time order, then discrete modes: Ordering ordering {X(2), X(1), X(0), N(0), N(1), N(2), M(1), M(2)}; diff --git a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp index 573fbc671..249cbf9c3 100644 --- a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp @@ -126,15 +126,10 @@ TEST(HybridGaussianElimination, IncrementalInference) { /********************************************************/ // Run batch elimination so we can compare results. - Ordering ordering; - ordering += X(0); - ordering += X(1); - ordering += X(2); + const Ordering ordering {X(0), X(1), X(2)}; // Now we calculate the expected factors using full elimination - HybridBayesTree::shared_ptr expectedHybridBayesTree; - HybridGaussianFactorGraph::shared_ptr expectedRemainingGraph; - std::tie(expectedHybridBayesTree, expectedRemainingGraph) = + const auto [expectedHybridBayesTree, expectedRemainingGraph] = switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); // The densities on X(0) should be the same @@ -160,12 +155,10 @@ TEST(HybridGaussianElimination, IncrementalInference) { // We only perform manual continuous elimination for 0,0. // The other discrete probabilities on M(2) are calculated the same way - Ordering discrete_ordering; - discrete_ordering += M(0); - discrete_ordering += M(1); + const Ordering discreteOrdering{M(0), M(1)}; HybridBayesTree::shared_ptr discreteBayesTree = expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal( - discrete_ordering); + discreteOrdering); DiscreteValues m00; m00[M(0)] = 0, m00[M(1)] = 0; @@ -227,13 +220,11 @@ TEST(HybridGaussianElimination, Approx_inference) { // Create ordering. Ordering ordering; for (size_t j = 0; j < 4; j++) { - ordering += X(j); + ordering.push_back(X(j)); } // Now we calculate the actual factors using full elimination - HybridBayesTree::shared_ptr unprunedHybridBayesTree; - HybridGaussianFactorGraph::shared_ptr unprunedRemainingGraph; - std::tie(unprunedHybridBayesTree, unprunedRemainingGraph) = + const auto [unprunedHybridBayesTree, unprunedRemainingGraph] = switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); size_t maxNrLeaves = 5; diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 73f969ff2..af3a23b94 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -265,7 +265,7 @@ TEST(HybridFactorGraph, EliminationTree) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < self.K; k++) ordering += X(k); + for (size_t k = 0; k < self.K; k++) ordering.push_back(X(k)); // Create elimination tree. HybridEliminationTree etree(self.linearizedFactorGraph, ordering); @@ -286,8 +286,7 @@ TEST(GaussianElimination, Eliminate_x0) { factors.push_back(self.linearizedFactorGraph[1]); // Eliminate x0 - Ordering ordering; - ordering += X(0); + const Ordering ordering{X(0)}; auto result = EliminateHybrid(factors, ordering); CHECK(result.first); @@ -310,8 +309,7 @@ TEST(HybridsGaussianElimination, Eliminate_x1) { factors.push_back(self.linearizedFactorGraph[2]); // involves m1 // Eliminate x1 - Ordering ordering; - ordering += X(1); + const Ordering ordering{X(1)}; auto result = EliminateHybrid(factors, ordering); CHECK(result.first); @@ -346,14 +344,9 @@ TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) { auto factors = self.linearizedFactorGraph; // Eliminate x0 - Ordering ordering; - ordering += X(0); - ordering += X(1); + const Ordering ordering{X(0), X(1)}; - HybridConditional::shared_ptr hybridConditionalMixture; - std::shared_ptr factorOnModes; - - std::tie(hybridConditionalMixture, factorOnModes) = + const auto [hybridConditionalMixture, factorOnModes] = EliminateHybrid(factors, ordering); auto gaussianConditionalMixture = @@ -382,12 +375,10 @@ TEST(HybridFactorGraph, Partial_Elimination) { // Create ordering of only continuous variables. Ordering ordering; - for (size_t k = 0; k < self.K; k++) ordering += X(k); + for (size_t k = 0; k < self.K; k++) ordering.push_back(X(k)); // Eliminate partially i.e. only continuous part. - HybridBayesNet::shared_ptr hybridBayesNet; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = linearizedFactorGraph.eliminatePartialSequential(ordering); CHECK(hybridBayesNet); @@ -415,17 +406,15 @@ TEST(HybridFactorGraph, Full_Elimination) { auto linearizedFactorGraph = self.linearizedFactorGraph; // We first do a partial elimination - HybridBayesNet::shared_ptr hybridBayesNet_partial; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph_partial; DiscreteBayesNet discreteBayesNet; { // Create ordering. Ordering ordering; - for (size_t k = 0; k < self.K; k++) ordering += X(k); + for (size_t k = 0; k < self.K; k++) ordering.push_back(X(k)); // Eliminate partially. - std::tie(hybridBayesNet_partial, remainingFactorGraph_partial) = + const auto [hybridBayesNet_partial, remainingFactorGraph_partial] = linearizedFactorGraph.eliminatePartialSequential(ordering); DiscreteFactorGraph discrete_fg; @@ -437,15 +426,15 @@ TEST(HybridFactorGraph, Full_Elimination) { } ordering.clear(); - for (size_t k = 0; k < self.K - 1; k++) ordering += M(k); + for (size_t k = 0; k < self.K - 1; k++) ordering.push_back(M(k)); discreteBayesNet = *discrete_fg.eliminateSequential(ordering, EliminateDiscrete); } // Create ordering. Ordering ordering; - for (size_t k = 0; k < self.K; k++) ordering += X(k); - for (size_t k = 0; k < self.K - 1; k++) ordering += M(k); + for (size_t k = 0; k < self.K; k++) ordering.push_back(X(k)); + for (size_t k = 0; k < self.K - 1; k++) ordering.push_back(M(k)); // Eliminate partially. HybridBayesNet::shared_ptr hybridBayesNet = @@ -486,12 +475,10 @@ TEST(HybridFactorGraph, Printing) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < self.K; k++) ordering += X(k); + for (size_t k = 0; k < self.K; k++) ordering.push_back(X(k)); // Eliminate partially. - HybridBayesNet::shared_ptr hybridBayesNet; - HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = linearizedFactorGraph.eliminatePartialSequential(ordering); string expected_hybridFactorGraph = R"( @@ -714,18 +701,12 @@ TEST(HybridFactorGraph, DefaultDecisionTree) { initialEstimate.insert(L(1), Point2(4.1, 1.8)); // We want to eliminate variables not connected to DCFactors first. - Ordering ordering; - ordering += L(0); - ordering += L(1); - ordering += X(0); - ordering += X(1); + const Ordering ordering {L(0), L(1), X(0), X(1)}; HybridGaussianFactorGraph linearized = *fg.linearize(initialEstimate); - gtsam::HybridBayesNet::shared_ptr hybridBayesNet; - gtsam::HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; // This should NOT fail - std::tie(hybridBayesNet, remainingFactorGraph) = + const auto [hybridBayesNet, remainingFactorGraph] = linearized.eliminatePartialSequential(ordering); EXPECT_LONGS_EQUAL(4, hybridBayesNet->size()); EXPECT_LONGS_EQUAL(1, remainingFactorGraph->size()); diff --git a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp index 0e7010595..2fb6fd8ba 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp @@ -143,15 +143,10 @@ TEST(HybridNonlinearISAM, IncrementalInference) { /********************************************************/ // Run batch elimination so we can compare results. - Ordering ordering; - ordering += X(0); - ordering += X(1); - ordering += X(2); + const Ordering ordering {X(0), X(1), X(2)}; // Now we calculate the actual factors using full elimination - HybridBayesTree::shared_ptr expectedHybridBayesTree; - HybridGaussianFactorGraph::shared_ptr expectedRemainingGraph; - std::tie(expectedHybridBayesTree, expectedRemainingGraph) = + const auto [expectedHybridBayesTree, expectedRemainingGraph] = switching.linearizedFactorGraph .BaseEliminateable::eliminatePartialMultifrontal(ordering); @@ -178,12 +173,10 @@ TEST(HybridNonlinearISAM, IncrementalInference) { // We only perform manual continuous elimination for 0,0. // The other discrete probabilities on M(1) are calculated the same way - Ordering discrete_ordering; - discrete_ordering += M(0); - discrete_ordering += M(1); + const Ordering discreteOrdering{M(0), M(1)}; HybridBayesTree::shared_ptr discreteBayesTree = expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal( - discrete_ordering); + discreteOrdering); DiscreteValues m00; m00[M(0)] = 0, m00[M(1)] = 0; @@ -246,13 +239,11 @@ TEST(HybridNonlinearISAM, Approx_inference) { // Create ordering. Ordering ordering; for (size_t j = 0; j < 4; j++) { - ordering += X(j); + ordering.push_back(X(j)); } // Now we calculate the actual factors using full elimination - HybridBayesTree::shared_ptr unprunedHybridBayesTree; - HybridGaussianFactorGraph::shared_ptr unprunedRemainingGraph; - std::tie(unprunedHybridBayesTree, unprunedRemainingGraph) = + const auto [unprunedHybridBayesTree, unprunedRemainingGraph] = switching.linearizedFactorGraph .BaseEliminateable::eliminatePartialMultifrontal(ordering); diff --git a/gtsam/inference/BayesNet-inst.h b/gtsam/inference/BayesNet-inst.h index f06008c88..a6aa0a70a 100644 --- a/gtsam/inference/BayesNet-inst.h +++ b/gtsam/inference/BayesNet-inst.h @@ -21,7 +21,6 @@ #include #include -#include #include #include @@ -56,7 +55,9 @@ void BayesNet::dot(std::ostream& os, os << "\n"; // Reverse order as typically Bayes nets stored in reverse topological sort. - for (auto conditional : boost::adaptors::reverse(*this)) { + for (auto it = std::make_reverse_iterator(this->end()); + it != std::make_reverse_iterator(this->begin()); ++it) { + const auto& conditional = *it; auto frontals = conditional->frontals(); const Key me = frontals.front(); auto parents = conditional->parents(); diff --git a/gtsam/inference/BayesTree-inst.h b/gtsam/inference/BayesTree-inst.h index 8e771708f..2d8f917dc 100644 --- a/gtsam/inference/BayesTree-inst.h +++ b/gtsam/inference/BayesTree-inst.h @@ -245,7 +245,7 @@ namespace gtsam { void BayesTree::fillNodesIndex(const sharedClique& subtree) { // Add each frontal variable of this root node for(const Key& j: subtree->conditional()->frontals()) { - bool inserted = nodes_.insert(std::make_pair(j, subtree)).second; + bool inserted = nodes_.insert({j, subtree}).second; assert(inserted); (void)inserted; } // Fill index for each child @@ -360,9 +360,10 @@ namespace gtsam { C1_minus_B.assign(C1_minus_B_set.begin(), C1_minus_B_set.end()); } // Factor into C1\B | B. - sharedFactorGraph temp_remaining; - boost::tie(p_C1_B, temp_remaining) = - FactorGraphType(p_C1_Bred).eliminatePartialMultifrontal(Ordering(C1_minus_B), function); + p_C1_B = + FactorGraphType(p_C1_Bred) + .eliminatePartialMultifrontal(Ordering(C1_minus_B), function) + .first; } std::shared_ptr p_C2_B; { KeyVector C2_minus_B; { @@ -372,20 +373,21 @@ namespace gtsam { C2_minus_B.assign(C2_minus_B_set.begin(), C2_minus_B_set.end()); } // Factor into C2\B | B. - sharedFactorGraph temp_remaining; - boost::tie(p_C2_B, temp_remaining) = - FactorGraphType(p_C2_Bred).eliminatePartialMultifrontal(Ordering(C2_minus_B), function); + p_C2_B = + FactorGraphType(p_C2_Bred) + .eliminatePartialMultifrontal(Ordering(C2_minus_B), function) + .first; } gttoc(Full_root_factoring); gttic(Variable_joint); - p_BC1C2 += p_B; - p_BC1C2 += *p_C1_B; - p_BC1C2 += *p_C2_B; + p_BC1C2.push_back(p_B); + p_BC1C2.push_back(*p_C1_B); + p_BC1C2.push_back(*p_C2_B); if(C1 != B) - p_BC1C2 += C1->conditional(); + p_BC1C2.push_back(C1->conditional()); if(C2 != B) - p_BC1C2 += C2->conditional(); + p_BC1C2.push_back(C2->conditional()); gttoc(Variable_joint); } else @@ -393,8 +395,8 @@ namespace gtsam { // The nodes have no common ancestor, they're in different trees, so they're joint is just the // product of their marginals. gttic(Disjoint_marginals); - p_BC1C2 += C1->marginal2(function); - p_BC1C2 += C2->marginal2(function); + p_BC1C2.push_back(C1->marginal2(function)); + p_BC1C2.push_back(C2->marginal2(function)); gttoc(Disjoint_marginals); } diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index 62ed0b90f..a91fa4f78 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -123,7 +123,7 @@ namespace gtsam { gttoc(BayesTreeCliqueBase_shortcut); FactorGraphType p_Cp_B(parent->shortcut(B, function)); // P(Sp||B) gttic(BayesTreeCliqueBase_shortcut); - p_Cp_B += parent->conditional_; // P(Fp|Sp) + p_Cp_B.push_back(parent->conditional_); // P(Fp|Sp) // Determine the variables we want to keepSet, S union B KeyVector keep = shortcut_indices(B, p_Cp_B); @@ -171,7 +171,7 @@ namespace gtsam { gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss); // now add the parent conditional - p_Cp += parent->conditional_; // P(Fp|Sp) + p_Cp.push_back(parent->conditional_); // P(Fp|Sp) // The variables we want to keepSet are exactly the ones in S KeyVector indicesS(this->conditional()->beginParents(), @@ -198,7 +198,7 @@ namespace gtsam { // initialize with separator marginal P(S) FactorGraphType p_C = this->separatorMarginal(function); // add the conditional P(F|S) - p_C += std::shared_ptr(this->conditional_); + p_C.push_back(std::shared_ptr(this->conditional_)); return p_C; } diff --git a/gtsam/inference/ClusterTree-inst.h b/gtsam/inference/ClusterTree-inst.h index 4d6dfb22e..a6175f76b 100644 --- a/gtsam/inference/ClusterTree-inst.h +++ b/gtsam/inference/ClusterTree-inst.h @@ -185,8 +185,8 @@ struct EliminationData { // Gather factors FactorGraphType gatheredFactors; gatheredFactors.reserve(node->factors.size() + node->nrChildren()); - gatheredFactors += node->factors; - gatheredFactors += myData.childFactors; + gatheredFactors.push_back(node->factors); + gatheredFactors.push_back(myData.childFactors); // Check for Bayes tree orphan subtrees, and add them to our children // TODO(frank): should this really happen here? @@ -209,9 +209,13 @@ struct EliminationData { // Fill nodes index - we do this here instead of calling insertRoot at the end to avoid // putting orphan subtrees in the index - they'll already be in the index of the ISAM2 // object they're added to. - for (const Key& j: myData.bayesTreeNode->conditional()->frontals()) - nodesIndex_.insert(std::make_pair(j, myData.bayesTreeNode)); - + for (const Key& j : myData.bayesTreeNode->conditional()->frontals()) { +#ifdef GTSAM_USE_TBB + nodesIndex_.insert({j, myData.bayesTreeNode}); +#else + nodesIndex_.emplace(j, myData.bayesTreeNode); +#endif + } // Store remaining factor in parent's gathered factors if (!eliminationResult.second->empty()) { #ifdef GTSAM_USE_TBB @@ -273,7 +277,7 @@ EliminatableClusterTree::eliminate(const Eliminate& function) } // Return result - return std::make_pair(result, remaining); + return {result, remaining}; } } // namespace gtsam diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index 9f08e5623..0ab8b49a4 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -18,8 +18,6 @@ // \callgraph #pragma once -#include - #include namespace gtsam { @@ -70,12 +68,33 @@ namespace gtsam { /// Typedef to this class typedef Conditional This; + public: + /** A mini implementation of an iterator range, to share const + * views of frontals and parents. */ + typedef std::pair ConstFactorRange; + struct ConstFactorRangeIterator { + ConstFactorRange range_; + // Delete default constructor + ConstFactorRangeIterator() = delete; + ConstFactorRangeIterator(ConstFactorRange const& x) : range_(x) {} + // Implement begin and end for iteration + typename FACTOR::const_iterator begin() const { return range_.first; } + typename FACTOR::const_iterator end() const { return range_.second; } + size_t size() const { return std::distance(range_.first, range_.second); } + const auto& front() const { return *begin(); } + // == operator overload for comparison with another iterator + template + bool operator==(const OTHER& rhs) const { + return std::equal(begin(), end(), rhs.begin()); + } + }; + /** View of the frontal keys (call frontals()) */ - typedef boost::iterator_range Frontals; + typedef ConstFactorRangeIterator Frontals; /** View of the separator keys (call parents()) */ - typedef boost::iterator_range Parents; + typedef ConstFactorRangeIterator Parents; protected: /// @name Standard Constructors @@ -121,10 +140,10 @@ namespace gtsam { } /** return a view of the frontal keys */ - Frontals frontals() const { return boost::make_iterator_range(beginFrontals(), endFrontals()); } + Frontals frontals() const { return ConstFactorRangeIterator({beginFrontals(), endFrontals()});} /** return a view of the parent keys */ - Parents parents() const { return boost::make_iterator_range(beginParents(), endParents()); } + Parents parents() const { return ConstFactorRangeIterator({beginParents(), endParents()}); } /** * All conditional types need to implement a `logProbability` function, for which diff --git a/gtsam/inference/EliminateableFactorGraph-inst.h b/gtsam/inference/EliminateableFactorGraph-inst.h index 558acbfe8..8a524e353 100644 --- a/gtsam/inference/EliminateableFactorGraph-inst.h +++ b/gtsam/inference/EliminateableFactorGraph-inst.h @@ -20,7 +20,6 @@ #include #include -#include namespace gtsam { @@ -73,9 +72,7 @@ namespace gtsam { gttic(eliminateSequential); // Do elimination EliminationTreeType etree(asDerived(), (*variableIndex).get(), ordering); - std::shared_ptr bayesNet; - std::shared_ptr factorGraph; - boost::tie(bayesNet,factorGraph) = etree.eliminate(function); + const auto [bayesNet, factorGraph] = etree.eliminate(function); // If any factors are remaining, the ordering was incomplete if(!factorGraph->empty()) throw InconsistentEliminationRequested(); @@ -137,9 +134,7 @@ namespace gtsam { // Do elimination with given ordering EliminationTreeType etree(asDerived(), (*variableIndex).get(), ordering); JunctionTreeType junctionTree(etree); - std::shared_ptr bayesTree; - std::shared_ptr factorGraph; - boost::tie(bayesTree,factorGraph) = junctionTree.eliminate(function); + const auto [bayesTree, factorGraph] = junctionTree.eliminate(function); // If any factors are remaining, the ordering was incomplete if(!factorGraph->empty()) throw InconsistentEliminationRequested(); @@ -231,7 +226,7 @@ namespace gtsam { template std::shared_ptr::BayesNetType> EliminateableFactorGraph::marginalMultifrontalBayesNet( - boost::variant variables, + const Ordering& variables, const Eliminate& function, OptionalVariableIndex variableIndex) const { if(!variableIndex) { @@ -241,16 +236,12 @@ namespace gtsam { } else { // No ordering was provided for the marginalized variables, so order them using constrained // COLAMD. - bool unmarginalizedAreOrdered = (boost::get(&variables) != 0); - const KeyVector* variablesOrOrdering = - unmarginalizedAreOrdered ? - boost::get(&variables) : boost::get(&variables); - + constexpr bool forceOrder = true; Ordering totalOrdering = - Ordering::ColamdConstrainedLast((*variableIndex).get(), *variablesOrOrdering, unmarginalizedAreOrdered); + Ordering::ColamdConstrainedLast((*variableIndex).get(), variables, forceOrder); // Split up ordering - const size_t nVars = variablesOrOrdering->size(); + const size_t nVars = variables.size(); Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars); Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end()); @@ -263,7 +254,35 @@ namespace gtsam { template std::shared_ptr::BayesNetType> EliminateableFactorGraph::marginalMultifrontalBayesNet( - boost::variant variables, + const KeyVector& variables, + const Eliminate& function, OptionalVariableIndex variableIndex) const + { + if(!variableIndex) { + // If no variable index is provided, compute one and call this function again + VariableIndex index(asDerived()); + return marginalMultifrontalBayesNet(variables, function, std::cref(index)); + } else { + // No ordering was provided for the marginalized variables, so order them using constrained + // COLAMD. + const constexpr bool forceOrder = false; + Ordering totalOrdering = + Ordering::ColamdConstrainedLast((*variableIndex).get(), variables, forceOrder); + + // Split up ordering + const size_t nVars = variables.size(); + Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars); + Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end()); + + // Call this function again with the computed orderings + return marginalMultifrontalBayesNet(marginalVarsOrdering, marginalizationOrdering, function, variableIndex); + } + } + + /* ************************************************************************* */ + template + std::shared_ptr::BayesNetType> + EliminateableFactorGraph::marginalMultifrontalBayesNet( + const Ordering& variables, const Ordering& marginalizedVariableOrdering, const Eliminate& function, OptionalVariableIndex variableIndex) const { @@ -275,22 +294,36 @@ namespace gtsam { gttic(marginalMultifrontalBayesNet); // An ordering was provided for the marginalized variables, so we can first eliminate them // in the order requested. - std::shared_ptr bayesTree; - std::shared_ptr factorGraph; - boost::tie(bayesTree,factorGraph) = + const auto [bayesTree, factorGraph] = eliminatePartialMultifrontal(marginalizedVariableOrdering, function, variableIndex); - if(const Ordering* varsAsOrdering = boost::get(&variables)) - { - // An ordering was also provided for the unmarginalized variables, so we can also - // eliminate them in the order requested. - return factorGraph->eliminateSequential(*varsAsOrdering, function); - } - else - { - // No ordering was provided for the unmarginalized variables, so order them with COLAMD. - return factorGraph->eliminateSequential(Ordering::COLAMD, function); - } + // An ordering was also provided for the unmarginalized variables, so we can also + // eliminate them in the order requested. + return factorGraph->eliminateSequential(variables, function); + } + } + + /* ************************************************************************* */ + template + std::shared_ptr::BayesNetType> + EliminateableFactorGraph::marginalMultifrontalBayesNet( + const KeyVector& variables, + const Ordering& marginalizedVariableOrdering, + const Eliminate& function, OptionalVariableIndex variableIndex) const + { + if(!variableIndex) { + // If no variable index is provided, compute one and call this function again + VariableIndex index(asDerived()); + return marginalMultifrontalBayesNet(variables, marginalizedVariableOrdering, function, index); + } else { + gttic(marginalMultifrontalBayesNet); + // An ordering was provided for the marginalized variables, so we can first eliminate them + // in the order requested. + const auto [bayesTree, factorGraph] = + eliminatePartialMultifrontal(marginalizedVariableOrdering, function, variableIndex); + + // No ordering was provided for the unmarginalized variables, so order them with COLAMD. + return factorGraph->eliminateSequential(Ordering::COLAMD, function); } } @@ -298,7 +331,7 @@ namespace gtsam { template std::shared_ptr::BayesTreeType> EliminateableFactorGraph::marginalMultifrontalBayesTree( - boost::variant variables, + const Ordering& variables, const Eliminate& function, OptionalVariableIndex variableIndex) const { if(!variableIndex) { @@ -308,16 +341,12 @@ namespace gtsam { } else { // No ordering was provided for the marginalized variables, so order them using constrained // COLAMD. - bool unmarginalizedAreOrdered = (boost::get(&variables) != 0); - const KeyVector* variablesOrOrdering = - unmarginalizedAreOrdered ? - boost::get(&variables) : boost::get(&variables); - + constexpr bool forceOrder = true; Ordering totalOrdering = - Ordering::ColamdConstrainedLast((*variableIndex).get(), *variablesOrOrdering, unmarginalizedAreOrdered); + Ordering::ColamdConstrainedLast((*variableIndex).get(), variables, forceOrder); // Split up ordering - const size_t nVars = variablesOrOrdering->size(); + const size_t nVars = variables.size(); Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars); Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end()); @@ -330,7 +359,35 @@ namespace gtsam { template std::shared_ptr::BayesTreeType> EliminateableFactorGraph::marginalMultifrontalBayesTree( - boost::variant variables, + const KeyVector& variables, + const Eliminate& function, OptionalVariableIndex variableIndex) const + { + if(!variableIndex) { + // If no variable index is provided, compute one and call this function again + VariableIndex computedVariableIndex(asDerived()); + return marginalMultifrontalBayesTree(variables, function, std::cref(computedVariableIndex)); + } else { + // No ordering was provided for the marginalized variables, so order them using constrained + // COLAMD. + constexpr bool forceOrder = false; + Ordering totalOrdering = + Ordering::ColamdConstrainedLast((*variableIndex).get(), variables, forceOrder); + + // Split up ordering + const size_t nVars = variables.size(); + Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars); + Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end()); + + // Call this function again with the computed orderings + return marginalMultifrontalBayesTree(marginalVarsOrdering, marginalizationOrdering, function, variableIndex); + } + } + + /* ************************************************************************* */ + template + std::shared_ptr::BayesTreeType> + EliminateableFactorGraph::marginalMultifrontalBayesTree( + const Ordering& variables, const Ordering& marginalizedVariableOrdering, const Eliminate& function, OptionalVariableIndex variableIndex) const { @@ -342,22 +399,36 @@ namespace gtsam { gttic(marginalMultifrontalBayesTree); // An ordering was provided for the marginalized variables, so we can first eliminate them // in the order requested. - std::shared_ptr bayesTree; - std::shared_ptr factorGraph; - boost::tie(bayesTree,factorGraph) = + const auto [bayesTree, factorGraph] = eliminatePartialMultifrontal(marginalizedVariableOrdering, function, variableIndex); - if(const Ordering* varsAsOrdering = boost::get(&variables)) - { - // An ordering was also provided for the unmarginalized variables, so we can also - // eliminate them in the order requested. - return factorGraph->eliminateMultifrontal(*varsAsOrdering, function); - } - else - { - // No ordering was provided for the unmarginalized variables, so order them with COLAMD. - return factorGraph->eliminateMultifrontal(Ordering::COLAMD, function); - } + // An ordering was also provided for the unmarginalized variables, so we can also + // eliminate them in the order requested. + return factorGraph->eliminateMultifrontal(variables, function); + } + } + + /* ************************************************************************* */ + template + std::shared_ptr::BayesTreeType> + EliminateableFactorGraph::marginalMultifrontalBayesTree( + const KeyVector& variables, + const Ordering& marginalizedVariableOrdering, + const Eliminate& function, OptionalVariableIndex variableIndex) const + { + if(!variableIndex) { + // If no variable index is provided, compute one and call this function again + VariableIndex computedVariableIndex(asDerived()); + return marginalMultifrontalBayesTree(variables, marginalizedVariableOrdering, function, std::cref(computedVariableIndex)); + } else { + gttic(marginalMultifrontalBayesTree); + // An ordering was provided for the marginalized variables, so we can first eliminate them + // in the order requested. + const auto [bayesTree, factorGraph] = + eliminatePartialMultifrontal(marginalizedVariableOrdering, function, variableIndex); + + // No ordering was provided for the unmarginalized variables, so order them with COLAMD. + return factorGraph->eliminateMultifrontal(Ordering::COLAMD, function); } } diff --git a/gtsam/inference/EliminateableFactorGraph.h b/gtsam/inference/EliminateableFactorGraph.h index 561c478ff..ac4b96d28 100644 --- a/gtsam/inference/EliminateableFactorGraph.h +++ b/gtsam/inference/EliminateableFactorGraph.h @@ -22,13 +22,11 @@ #include #include #include -#include #include #include namespace gtsam { - /// Traits class for eliminateable factor graphs, specifies the types that result from /// elimination, etc. This must be defined for each factor graph that inherits from /// EliminateableFactorGraph. @@ -141,7 +139,7 @@ namespace gtsam { OptionalVariableIndex variableIndex = {}) const; /** Do multifrontal elimination of all variables to produce a Bayes tree. If an ordering is not - * provided, the ordering will be computed using either COLAMD or METIS, dependeing on + * provided, the ordering will be computed using either COLAMD or METIS, depending on * the parameter orderingType (Ordering::COLAMD or Ordering::METIS) * * Example - Full Cholesky elimination in COLAMD order: @@ -162,7 +160,7 @@ namespace gtsam { OptionalVariableIndex variableIndex = {}) const; /** Do multifrontal elimination of all variables to produce a Bayes tree. If an ordering is not - * provided, the ordering will be computed using either COLAMD or METIS, dependeing on + * provided, the ordering will be computed using either COLAMD or METIS, depending on * the parameter orderingType (Ordering::COLAMD or Ordering::METIS) * * Example - Full QR elimination in specified order: @@ -217,60 +215,108 @@ namespace gtsam { /** Compute the marginal of the requested variables and return the result as a Bayes net. Uses * COLAMD marginalization ordering by default - * @param variables Determines the variables whose marginal to compute, if provided as an - * Ordering they will be ordered in the returned BayesNet as specified, and if provided - * as a KeyVector they will be ordered using constrained COLAMD. - * @param function Optional dense elimination function, if not provided the default will be - * used. + * @param variables Determines the *ordered* variables whose marginal to compute, + * will be ordered in the returned BayesNet as specified. + * @param function Optional dense elimination function. * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not - * provided one will be computed. */ + * provided one will be computed. + */ std::shared_ptr marginalMultifrontalBayesNet( - boost::variant variables, + const Ordering& variables, + const Eliminate& function = EliminationTraitsType::DefaultEliminate, + OptionalVariableIndex variableIndex = {}) const; + + /** Compute the marginal of the requested variables and return the result as a Bayes net. Uses + * COLAMD marginalization ordering by default + * @param variables Determines the variables whose marginal to compute, will be ordered + * using COLAMD; use `Ordering(variables)` to specify the variable ordering. + * @param function Optional dense elimination function. + * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not + * provided one will be computed. + */ + std::shared_ptr marginalMultifrontalBayesNet( + const KeyVector& variables, const Eliminate& function = EliminationTraitsType::DefaultEliminate, OptionalVariableIndex variableIndex = {}) const; /** Compute the marginal of the requested variables and return the result as a Bayes net. - * @param variables Determines the variables whose marginal to compute, if provided as an - * Ordering they will be ordered in the returned BayesNet as specified, and if provided - * as a KeyVector they will be ordered using constrained COLAMD. + * @param variables Determines the *ordered* variables whose marginal to compute, + * will be ordered in the returned BayesNet as specified. * @param marginalizedVariableOrdering Ordering for the variables being marginalized out, * i.e. all variables not in \c variables. - * @param function Optional dense elimination function, if not provided the default will be - * used. + * @param function Optional dense elimination function. * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not - * provided one will be computed. */ + * provided one will be computed. + */ std::shared_ptr marginalMultifrontalBayesNet( - boost::variant variables, + const Ordering& variables, + const Ordering& marginalizedVariableOrdering, + const Eliminate& function = EliminationTraitsType::DefaultEliminate, + OptionalVariableIndex variableIndex = {}) const; + + /** Compute the marginal of the requested variables and return the result as a Bayes net. + * @param variables Determines the variables whose marginal to compute, will be ordered + * using COLAMD; use `Ordering(variables)` to specify the variable ordering. + * @param marginalizedVariableOrdering Ordering for the variables being marginalized out, + * i.e. all variables not in \c variables. + * @param function Optional dense elimination function. + * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not + * provided one will be computed. + */ + std::shared_ptr marginalMultifrontalBayesNet( + const KeyVector& variables, const Ordering& marginalizedVariableOrdering, const Eliminate& function = EliminationTraitsType::DefaultEliminate, OptionalVariableIndex variableIndex = {}) const; /** Compute the marginal of the requested variables and return the result as a Bayes tree. Uses * COLAMD marginalization order by default - * @param variables Determines the variables whose marginal to compute, if provided as an - * Ordering they will be ordered in the returned BayesNet as specified, and if provided - * as a KeyVector they will be ordered using constrained COLAMD. - * @param function Optional dense elimination function, if not provided the default will be - * used. + * @param variables Determines the *ordered* variables whose marginal to compute, + * will be ordered in the returned BayesNet as specified. + * @param function Optional dense elimination function.. * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not * provided one will be computed. */ std::shared_ptr marginalMultifrontalBayesTree( - boost::variant variables, + const Ordering& variables, + const Eliminate& function = EliminationTraitsType::DefaultEliminate, + OptionalVariableIndex variableIndex = {}) const; + + /** Compute the marginal of the requested variables and return the result as a Bayes tree. Uses + * COLAMD marginalization order by default + * @param variables Determines the variables whose marginal to compute, will be ordered + * using COLAMD; use `Ordering(variables)` to specify the variable ordering. + * @param function Optional dense elimination function.. + * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not + * provided one will be computed. */ + std::shared_ptr marginalMultifrontalBayesTree( + const KeyVector& variables, const Eliminate& function = EliminationTraitsType::DefaultEliminate, OptionalVariableIndex variableIndex = {}) const; /** Compute the marginal of the requested variables and return the result as a Bayes tree. - * @param variables Determines the variables whose marginal to compute, if provided as an - * Ordering they will be ordered in the returned BayesNet as specified, and if provided - * as a KeyVector they will be ordered using constrained COLAMD. + * @param variables Determines the *ordered* variables whose marginal to compute, + * will be ordered in the returned BayesNet as specified. * @param marginalizedVariableOrdering Ordering for the variables being marginalized out, * i.e. all variables not in \c variables. - * @param function Optional dense elimination function, if not provided the default will be - * used. + * @param function Optional dense elimination function.. * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not * provided one will be computed. */ std::shared_ptr marginalMultifrontalBayesTree( - boost::variant variables, + const Ordering& variables, + const Ordering& marginalizedVariableOrdering, + const Eliminate& function = EliminationTraitsType::DefaultEliminate, + OptionalVariableIndex variableIndex = {}) const; + + /** Compute the marginal of the requested variables and return the result as a Bayes tree. + * @param variables Determines the variables whose marginal to compute, will be ordered + * using COLAMD; use `Ordering(variables)` to specify the variable ordering. + * @param marginalizedVariableOrdering Ordering for the variables being marginalized out, + * i.e. all variables not in \c variables. + * @param function Optional dense elimination function.. + * @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not + * provided one will be computed. */ + std::shared_ptr marginalMultifrontalBayesTree( + const KeyVector& variables, const Ordering& marginalizedVariableOrdering, const Eliminate& function = EliminationTraitsType::DefaultEliminate, OptionalVariableIndex variableIndex = {}) const; diff --git a/gtsam/inference/EliminationTree-inst.h b/gtsam/inference/EliminationTree-inst.h index cd1684d9d..63cbe222b 100644 --- a/gtsam/inference/EliminationTree-inst.h +++ b/gtsam/inference/EliminationTree-inst.h @@ -198,7 +198,7 @@ namespace gtsam { allRemainingFactors->push_back(remainingFactors.begin(), remainingFactors.end()); // Return result - return std::make_pair(result, allRemainingFactors); + return {result, allRemainingFactors}; } /* ************************************************************************* */ @@ -218,13 +218,13 @@ namespace gtsam { // Add roots in sorted order { FastMap keys; - for(const sharedNode& root: this->roots_) { keys.insert(std::make_pair(root->key, root)); } + for(const sharedNode& root: this->roots_) { keys.emplace(root->key, root); } typedef typename FastMap::value_type Key_Node; for(const Key_Node& key_node: keys) { stack1.push(key_node.second); } } { FastMap keys; - for(const sharedNode& root: expected.roots_) { keys.insert(std::make_pair(root->key, root)); } + for(const sharedNode& root: expected.roots_) { keys.emplace(root->key, root); } typedef typename FastMap::value_type Key_Node; for(const Key_Node& key_node: keys) { stack2.push(key_node.second); } } @@ -258,13 +258,13 @@ namespace gtsam { // Add children in sorted order { FastMap keys; - for(const sharedNode& node: node1->children) { keys.insert(std::make_pair(node->key, node)); } + for(const sharedNode& node: node1->children) { keys.emplace(node->key, node); } typedef typename FastMap::value_type Key_Node; for(const Key_Node& key_node: keys) { stack1.push(key_node.second); } } { FastMap keys; - for(const sharedNode& node: node2->children) { keys.insert(std::make_pair(node->key, node)); } + for(const sharedNode& node: node2->children) { keys.emplace(node->key, node); } typedef typename FastMap::value_type Key_Node; for(const Key_Node& key_node: keys) { stack2.push(key_node.second); } } diff --git a/gtsam/inference/FactorGraph.h b/gtsam/inference/FactorGraph.h index caaa13cae..f1dc37c37 100644 --- a/gtsam/inference/FactorGraph.h +++ b/gtsam/inference/FactorGraph.h @@ -29,7 +29,10 @@ #include // for Eigen::aligned_allocator +#ifdef GTSAM_USE_BOOST_FEATURES #include +#endif + #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #include @@ -212,6 +215,7 @@ class FactorGraph { push_back(factor); } +#ifdef GTSAM_USE_BOOST_FEATURES /// `+=` works well with boost::assign list inserter. template typename std::enable_if< @@ -221,6 +225,7 @@ class FactorGraph { return boost::assign::make_list_inserter(RefCallPushBack(*this))( factor); } +#endif /// @} /// @name Adding via iterators @@ -271,6 +276,7 @@ class FactorGraph { push_back(factorOrContainer); } +#ifdef GTSAM_USE_BOOST_FEATURES /** * Add a factor or container of factors, including STL collections, * BayesTrees, etc. @@ -281,6 +287,7 @@ class FactorGraph { return boost::assign::make_list_inserter(CRefCallPushBack(*this))( factorOrContainer); } +#endif /// @} /// @name Specialized versions diff --git a/gtsam/inference/ISAM-inst.h b/gtsam/inference/ISAM-inst.h index b93c58c48..dc0750b72 100644 --- a/gtsam/inference/ISAM-inst.h +++ b/gtsam/inference/ISAM-inst.h @@ -36,12 +36,12 @@ void ISAM::updateInternal(const FactorGraphType& newFactors, // Add the removed top and the new factors FactorGraphType factors; - factors += bn; - factors += newFactors; + factors.push_back(bn); + factors.push_back(newFactors); // Add the orphaned subtrees for (const sharedClique& orphan : *orphans) - factors += std::make_shared >(orphan); + factors.template emplace_shared >(orphan); // Get an ordering where the new keys are eliminated last const VariableIndex index(factors); diff --git a/gtsam/inference/JunctionTree-inst.h b/gtsam/inference/JunctionTree-inst.h index 86be13760..0f79c2a9a 100644 --- a/gtsam/inference/JunctionTree-inst.h +++ b/gtsam/inference/JunctionTree-inst.h @@ -77,16 +77,14 @@ struct ConstructorTraversalData { symbolicFactors.reserve( ETreeNode->factors.size() + myData.childSymbolicFactors.size()); // Add ETree node factors - symbolicFactors += ETreeNode->factors; + symbolicFactors.push_back(ETreeNode->factors); // Add symbolic factors passed up from children - symbolicFactors += myData.childSymbolicFactors; + symbolicFactors.push_back(myData.childSymbolicFactors); Ordering keyAsOrdering; keyAsOrdering.push_back(ETreeNode->key); - SymbolicConditional::shared_ptr myConditional; - SymbolicFactor::shared_ptr mySeparatorFactor; - boost::tie(myConditional, mySeparatorFactor) = internal::EliminateSymbolic( - symbolicFactors, keyAsOrdering); + const auto [myConditional, mySeparatorFactor] = + internal::EliminateSymbolic(symbolicFactors, keyAsOrdering); // Store symbolic elimination results in the parent myData.parentData->childSymbolicConditionals.push_back(myConditional); diff --git a/gtsam/inference/Key.cpp b/gtsam/inference/Key.cpp index dd433ff09..8fcea0d05 100644 --- a/gtsam/inference/Key.cpp +++ b/gtsam/inference/Key.cpp @@ -20,7 +20,6 @@ #include #include -#include #include using namespace std; @@ -30,10 +29,12 @@ namespace gtsam { /* ************************************************************************* */ string _defaultKeyFormatter(Key key) { const Symbol asSymbol(key); - if (asSymbol.chr() > 0) + if (asSymbol.chr() > 0) { return (string) asSymbol; - else - return boost::lexical_cast(key); + } + else { + return std::to_string(key); + } } /* ************************************************************************* */ @@ -48,10 +49,12 @@ string _multirobotKeyFormatter(Key key) { return (string) asLabeledSymbol; const Symbol asSymbol(key); - if (asLabeledSymbol.chr() > 0) + if (asLabeledSymbol.chr() > 0) { return (string) asSymbol; - else - return boost::lexical_cast(key); + } + else { + return std::to_string(key); + } } /* ************************************************************************* */ diff --git a/gtsam/inference/LabeledSymbol.cpp b/gtsam/inference/LabeledSymbol.cpp index 6a1739e20..c187e864e 100644 --- a/gtsam/inference/LabeledSymbol.cpp +++ b/gtsam/inference/LabeledSymbol.cpp @@ -17,8 +17,6 @@ #include -#include -#include #include namespace gtsam { @@ -73,7 +71,10 @@ void LabeledSymbol::print(const std::string& s) const { /* ************************************************************************* */ LabeledSymbol::operator std::string() const { - return str(boost::format("%c%c%d") % c_ % label_ % j_); + char buffer[100]; + snprintf(buffer, 100, "%c%c%llu", c_, label_, + static_cast(j_)); + return std::string(buffer); } /* ************************************************************************* */ diff --git a/gtsam/inference/MetisIndex-inl.h b/gtsam/inference/MetisIndex-inl.h index 646523372..672036373 100644 --- a/gtsam/inference/MetisIndex-inl.h +++ b/gtsam/inference/MetisIndex-inl.h @@ -44,7 +44,7 @@ void MetisIndex::augment(const FACTORGRAPH& factors) { for(const Key& key: *factors[i]) { keySet.insert(keySet.end(), key); // Keep a track of all unique keys if (intKeyBMap_.left.find(key) == intKeyBMap_.left.end()) { - intKeyBMap_.insert(bm_type::value_type(key, keyCounter)); + intKeyBMap_.insert(key, keyCounter); keyCounter++; } } diff --git a/gtsam/inference/MetisIndex.h b/gtsam/inference/MetisIndex.h index 4b1f3dfce..abdf11c5f 100644 --- a/gtsam/inference/MetisIndex.h +++ b/gtsam/inference/MetisIndex.h @@ -22,17 +22,9 @@ #include #include -// Boost bimap generates many ugly warnings in CLANG -#ifdef __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wredeclared-class-member" -#endif -#include -#ifdef __clang__ -# pragma clang diagnostic pop -#endif - #include +#include +#include namespace gtsam { /** @@ -45,12 +37,21 @@ namespace gtsam { class GTSAM_EXPORT MetisIndex { public: typedef std::shared_ptr shared_ptr; - typedef boost::bimap bm_type; private: + // Stores Key <-> integer value relationship + struct BiMap { + std::map left; + std::unordered_map right; + void insert(const Key& left_value, const int32_t& right_value) { + left[left_value] = right_value; + right[right_value] = left_value; + } + }; + std::vector xadj_; // Index of node's adjacency list in adj std::vector adj_; // Stores ajacency lists of all nodes, appended into a single vector - boost::bimap intKeyBMap_; // Stores Key <-> integer value relationship + BiMap intKeyBMap_; // Stores Key <-> integer value relationship size_t nKeys_; public: diff --git a/gtsam/inference/Ordering.cpp b/gtsam/inference/Ordering.cpp index 7b7ff1403..2956c7575 100644 --- a/gtsam/inference/Ordering.cpp +++ b/gtsam/inference/Ordering.cpp @@ -19,8 +19,6 @@ #include #include -#include - #include #include @@ -107,9 +105,9 @@ Ordering Ordering::ColamdConstrained(const VariableIndex& variableIndex, gttic(ccolamd); int rv = ccolamd((int) nFactors, (int) nVars, (int) Alen, &A[0], &p[0], knobs, stats, &cmember[0]); - if (rv != 1) - throw runtime_error( - (boost::format("ccolamd failed with return value %1%") % rv).str()); + if (rv != 1) { + throw runtime_error("ccolamd failed with return value " + to_string(rv)); + } } // ccolamd_report(stats); diff --git a/gtsam/inference/Ordering.h b/gtsam/inference/Ordering.h index ad41aa5bb..884a93f0d 100644 --- a/gtsam/inference/Ordering.h +++ b/gtsam/inference/Ordering.h @@ -25,7 +25,10 @@ #include #include +#ifdef GTSAM_USE_BOOST_FEATURES #include +#endif + #include #include @@ -58,6 +61,7 @@ public: Base(keys.begin(), keys.end()) { } +#ifdef GTSAM_USE_BOOST_FEATURES /// Add new variables to the ordering as ordering += key1, key2, ... Equivalent to calling /// push_back. boost::assign::list_inserter > operator+=( @@ -65,6 +69,7 @@ public: return boost::assign::make_list_inserter( boost::assign_detail::call_push_back(*this))(key); } +#endif /** * @brief Append new keys to the ordering as `ordering += keys`. diff --git a/gtsam/inference/Symbol.cpp b/gtsam/inference/Symbol.cpp index 925ba9ce3..24af9d9f6 100644 --- a/gtsam/inference/Symbol.cpp +++ b/gtsam/inference/Symbol.cpp @@ -18,12 +18,11 @@ #include -#include -#include - #include #include #include +#include +#include namespace gtsam { @@ -40,8 +39,8 @@ Symbol::Symbol(Key key) : Key Symbol::key() const { if (j_ > indexMask) { - boost::format msg("Symbol index is too large, j=%d, indexMask=%d"); - msg % j_ % indexMask; + std::stringstream msg; + msg << "Symbol index is too large, j=" << j_ << ", indexMask=" << indexMask; throw std::invalid_argument(msg.str()); } Key key = (Key(c_) << indexBits) | j_; @@ -57,7 +56,9 @@ bool Symbol::equals(const Symbol& expected, double tol) const { } Symbol::operator std::string() const { - return str(boost::format("%c%d") % c_ % j_); + char buffer[10]; + snprintf(buffer, 10, "%c%llu", c_, static_cast(j_)); + return std::string(buffer); } static Symbol make(gtsam::Key key) { return Symbol(key);} diff --git a/gtsam/inference/VariableSlots.h b/gtsam/inference/VariableSlots.h index a665890c2..edc1b1840 100644 --- a/gtsam/inference/VariableSlots.h +++ b/gtsam/inference/VariableSlots.h @@ -24,7 +24,6 @@ #include #include -#include #include #include @@ -110,8 +109,7 @@ VariableSlots::VariableSlots(const FG& factorGraph) // we're combining. Initially we put the max integer value in // the array entry for each factor that will indicate the factor // does not involve the variable. - iterator thisVarSlots; bool inserted; - boost::tie(thisVarSlots, inserted) = this->insert(std::make_pair(involvedVariable, FastVector())); + auto [thisVarSlots, inserted] = this->insert({involvedVariable, FastVector()}); if(inserted) thisVarSlots->second.resize(factorGraph.nrFactors(), Empty); thisVarSlots->second[jointFactorPos] = factorVarSlot; diff --git a/gtsam/inference/graph-inl.h b/gtsam/inference/graph-inl.h index 93bb2a51c..94a239524 100644 --- a/gtsam/inference/graph-inl.h +++ b/gtsam/inference/graph-inl.h @@ -48,13 +48,8 @@ public: /* ************************************************************************* */ template std::list predecessorMap2Keys(const PredecessorMap& p_map) { - typedef typename SGraph::Vertex SVertex; - - SGraph g; - SVertex root; - std::map key2vertex; - boost::tie(g, root, key2vertex) = gtsam::predecessorMap2Graph, SVertex, KEY>(p_map); + const auto [g, root, key2vertex] = gtsam::predecessorMap2Graph, SVertex, KEY>(p_map); // breadth first visit on the graph std::list keys; @@ -114,25 +109,23 @@ SDGraph toBoostGraph(const G& graph) { /* ************************************************************************* */ template -boost::tuple > +std::tuple > predecessorMap2Graph(const PredecessorMap& p_map) { G g; std::map key2vertex; V v1, v2, root; bool foundRoot = false; - for(auto child_parent: p_map) { - KEY child, parent; - std::tie(child,parent) = child_parent; + for(const auto& [child, parent]: p_map) { if (key2vertex.find(child) == key2vertex.end()) { v1 = add_vertex(child, g); - key2vertex.insert(std::make_pair(child, v1)); + key2vertex.emplace(child, v1); } else v1 = key2vertex[child]; if (key2vertex.find(parent) == key2vertex.end()) { v2 = add_vertex(parent, g); - key2vertex.insert(std::make_pair(parent, v2)); + key2vertex.emplace(parent, v2); } else v2 = key2vertex[parent]; @@ -146,7 +139,7 @@ predecessorMap2Graph(const PredecessorMap& p_map) { if (!foundRoot) throw std::invalid_argument("predecessorMap2Graph: invalid predecessor map!"); else - return boost::tuple >(g, root, key2vertex); + return std::tuple >(g, root, key2vertex); } /* ************************************************************************* */ @@ -180,17 +173,14 @@ std::shared_ptr composePoses(const G& graph, const PredecessorMap& boost::property, boost::property > PoseGraph; typedef typename boost::graph_traits::vertex_descriptor PoseVertex; - typedef typename boost::graph_traits::edge_descriptor PoseEdge; PoseGraph g; PoseVertex root; std::map key2vertex; - boost::tie(g, root, key2vertex) = + std::tie(g, root, key2vertex) = predecessorMap2Graph(tree); // attach the relative poses to the edges - PoseEdge edge12, edge21; - bool found1, found2; for(typename G::sharedFactor nl_factor: graph) { if (nl_factor->keys().size() > 2) @@ -207,8 +197,8 @@ std::shared_ptr composePoses(const G& graph, const PredecessorMap& PoseVertex v2 = key2vertex.find(key2)->second; POSE l1Xl2 = factor->measured(); - boost::tie(edge12, found1) = boost::edge(v1, v2, g); - boost::tie(edge21, found2) = boost::edge(v2, v1, g); + const auto [edge12, found1] = boost::edge(v1, v2, g); + const auto [edge21, found2] = boost::edge(v2, v1, g); if (found1 && found2) throw std::invalid_argument ("composePoses: invalid spanning tree"); if (!found1 && !found2) continue; if (found1) diff --git a/gtsam/inference/graph.h b/gtsam/inference/graph.h index 9f5f4c9bc..9bb181467 100644 --- a/gtsam/inference/graph.h +++ b/gtsam/inference/graph.h @@ -59,7 +59,7 @@ namespace gtsam { public: /** convenience insert so we can pass ints for TypedSymbol keys */ inline void insert(const KEY& key, const KEY& parent) { - std::map::insert(std::make_pair(key, parent)); + std::map::emplace(key, parent); } }; @@ -83,7 +83,7 @@ namespace gtsam { * V = Vertex type */ template - boost::tuple > predecessorMap2Graph(const PredecessorMap& p_map); + std::tuple > predecessorMap2Graph(const PredecessorMap& p_map); /** * Compose the poses by following the chain specified by the spanning tree diff --git a/gtsam/inference/inferenceExceptions.h b/gtsam/inference/inferenceExceptions.h index bcd986983..fa3e3cb25 100644 --- a/gtsam/inference/inferenceExceptions.h +++ b/gtsam/inference/inferenceExceptions.h @@ -18,7 +18,6 @@ #pragma once #include -#include #include namespace gtsam { diff --git a/gtsam/inference/tests/testSymbol.cpp b/gtsam/inference/tests/testSymbol.cpp index bedd69044..af49a1e2e 100644 --- a/gtsam/inference/tests/testSymbol.cpp +++ b/gtsam/inference/tests/testSymbol.cpp @@ -18,6 +18,8 @@ #include +#include + using namespace std; using namespace gtsam; diff --git a/gtsam/linear/ConjugateGradientSolver.cpp b/gtsam/linear/ConjugateGradientSolver.cpp index a1b9e2d83..006e7d67d 100644 --- a/gtsam/linear/ConjugateGradientSolver.cpp +++ b/gtsam/linear/ConjugateGradientSolver.cpp @@ -18,7 +18,6 @@ */ #include -#include #include using namespace std; @@ -49,7 +48,9 @@ std::string ConjugateGradientParameters::blasTranslator(const BLASKernel value) /*****************************************************************************/ ConjugateGradientParameters::BLASKernel ConjugateGradientParameters::blasTranslator( const std::string &src) { - std::string s = src; boost::algorithm::to_upper(s); + std::string s = src; + // Convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "GTSAM") return ConjugateGradientParameters::GTSAM; /* default is SBM */ diff --git a/gtsam/linear/Errors.cpp b/gtsam/linear/Errors.cpp index 92f7bb4b8..d36b647ce 100644 --- a/gtsam/linear/Errors.cpp +++ b/gtsam/linear/Errors.cpp @@ -17,7 +17,6 @@ * @author Christian Potthast */ -#include #include #include @@ -28,7 +27,7 @@ namespace gtsam { /* ************************************************************************* */ Errors createErrors(const VectorValues& V) { Errors result; - for (const Vector& e : V | boost::adaptors::map_values) { + for (const auto& [key, e] : V) { result.push_back(e); } return result; diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 2e62d2d42..b04878ac5 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -20,8 +20,8 @@ #include #include -#include #include +#include using namespace std; using namespace gtsam; @@ -50,11 +50,11 @@ namespace gtsam { VectorValues solution = given; // (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas) // solve each node in reverse topological sort order (parents first) - for (auto cg : boost::adaptors::reverse(*this)) { + for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { // i^th part of R*x=y, x=inv(R)*y // (Rii*xi + R_i*x(i+1:))./si = yi => // xi = inv(Rii)*(yi.*si - R_i*x(i+1:)) - solution.insert(cg->solve(solution)); + solution.insert((*it)->solve(solution)); } return solution; } @@ -69,8 +69,8 @@ namespace gtsam { std::mt19937_64* rng) const { VectorValues result(given); // sample each node in reverse topological sort order (parents first) - for (auto cg : boost::adaptors::reverse(*this)) { - const VectorValues sampled = cg->sample(result, rng); + for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { + const VectorValues sampled = (*it)->sample(result, rng); result.insert(sampled); } return result; @@ -131,8 +131,8 @@ namespace gtsam { VectorValues result; // TODO this looks pretty sketchy. result is passed as the parents argument // as it's filled up by solving the gaussian conditionals. - for (auto cg: boost::adaptors::reverse(*this)) { - result.insert(cg->solveOtherRHS(result, rhs)); + for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { + result.insert((*it)->solveOtherRHS(result, rhs)); } return result; } diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index e78caa2d9..188c31abe 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -21,13 +21,10 @@ #include #include -#include #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-variable" #endif -#include -#include #ifdef __GNUC__ #pragma GCC diagnostic pop #endif @@ -104,22 +101,20 @@ namespace gtsam { void GaussianConditional::print(const string &s, const KeyFormatter& formatter) const { cout << s << " p("; for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) { - cout << (boost::format("%1%") % (formatter(*it))).str() - << (nrFrontals() > 1 ? " " : ""); + cout << formatter(*it) << (nrFrontals() > 1 ? " " : ""); } if (nrParents()) { cout << " |"; for (const_iterator it = beginParents(); it != endParents(); ++it) { - cout << " " << (boost::format("%1%") % (formatter(*it))).str(); + cout << " " << formatter(*it); } } cout << ")" << endl; cout << formatMatrixIndented(" R = ", R()) << endl; for (const_iterator it = beginParents() ; it != endParents() ; ++it) { - cout << formatMatrixIndented((boost::format(" S[%1%] = ")%(formatter(*it))).str(), getA(it)) - << endl; + cout << formatMatrixIndented(" S[" + formatter(*it) + "] = ", getA(it)) << endl; } cout << formatMatrixIndented(" d = ", getb(), true) << "\n"; if (nrParents() == 0) { diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index f99006831..43d349b67 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -19,8 +19,6 @@ #pragma once -#include - #include #include #include diff --git a/gtsam/linear/GaussianDensity.cpp b/gtsam/linear/GaussianDensity.cpp index 343396c0a..196be3402 100644 --- a/gtsam/linear/GaussianDensity.cpp +++ b/gtsam/linear/GaussianDensity.cpp @@ -17,7 +17,6 @@ */ #include -#include #include using std::cout; @@ -37,8 +36,9 @@ GaussianDensity GaussianDensity::FromMeanAndStddev(Key key, const Vector& mean, void GaussianDensity::print(const string& s, const KeyFormatter& formatter) const { cout << s << ": density on "; - for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) - cout << (boost::format("[%1%]") % (formatter(*it))).str() << " "; + for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) { + cout << "[" << formatter(*it) << "] "; + } cout << endl; gtsam::print(mean(), "mean: "); gtsam::print(covariance(), "covariance: "); diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 194a7ca53..db943aa70 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -29,20 +29,11 @@ #include #include -#include -#include -#include -#include -#include - #include #include +#include "gtsam/base/Vector.h" using namespace std; -namespace br { -using namespace boost::range; -using namespace boost::adaptors; -} namespace gtsam { @@ -141,15 +132,19 @@ HessianFactor::HessianFactor(Key j1, Key j2, Key j3, const Matrix& G11, /* ************************************************************************* */ namespace { -DenseIndex _getSizeHF(const Vector& m) { - return m.size(); -} +static std::vector _getSizeHFVec(const std::vector& m) { + std::vector dims; + for (const Vector& v : m) { + dims.push_back(v.size()); + } + return dims; } +} // namespace /* ************************************************************************* */ HessianFactor::HessianFactor(const KeyVector& js, const std::vector& Gs, const std::vector& gs, double f) : - GaussianFactor(js), info_(gs | br::transformed(&_getSizeHF), true) { + GaussianFactor(js), info_(_getSizeHFVec(gs), true) { // Get the number of variables size_t variable_count = js.size(); @@ -415,9 +410,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x, // copy to yvalues for (DenseIndex i = 0; i < (DenseIndex) size(); ++i) { - bool didNotExist; - VectorValues::iterator it; - boost::tie(it, didNotExist) = yvalues.tryInsert(keys_[i], Vector()); + const auto [it, didNotExist] = yvalues.tryInsert(keys_[i], Vector()); if (didNotExist) it->second = alpha * y[i]; // init else diff --git a/gtsam/linear/IterativeSolver.cpp b/gtsam/linear/IterativeSolver.cpp index 5adb1916a..c5a25c4b8 100644 --- a/gtsam/linear/IterativeSolver.cpp +++ b/gtsam/linear/IterativeSolver.cpp @@ -19,7 +19,6 @@ #include #include #include -#include #include using namespace std; @@ -57,7 +56,8 @@ ostream& operator<<(ostream &os, const IterativeOptimizationParameters &p) { IterativeOptimizationParameters::Verbosity IterativeOptimizationParameters::verbosityTranslator( const string &src) { string s = src; - boost::algorithm::to_upper(s); + // Convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SILENT") return IterativeOptimizationParameters::SILENT; else if (s == "COMPLEXITY") diff --git a/gtsam/linear/IterativeSolver.h b/gtsam/linear/IterativeSolver.h index 654e2ea12..c4a719436 100644 --- a/gtsam/linear/IterativeSolver.h +++ b/gtsam/linear/IterativeSolver.h @@ -21,7 +21,6 @@ #include #include -#include #include #include diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index 6f8bd99f9..579f6cbc2 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -31,12 +31,6 @@ #include #include -#include -#include -#include -#include -#include - #include #include #include @@ -100,9 +94,7 @@ JacobianFactor::JacobianFactor(const HessianFactor& factor) Ab_.full() = factor.info().selfadjointView(); // Do Cholesky to get a Jacobian - size_t maxrank; - bool success; - boost::tie(maxrank, success) = choleskyCareful(Ab_.matrix()); + const auto [maxrank, success] = choleskyCareful(Ab_.matrix()); // Check that Cholesky succeeded OR it managed to factor the full Hessian. // THe latter case occurs with non-positive definite matrices arising from QP. @@ -122,7 +114,7 @@ JacobianFactor::JacobianFactor(const HessianFactor& factor) /* ************************************************************************* */ // Helper functions for combine constructor namespace { -boost::tuple, DenseIndex, DenseIndex> _countDims( +std::tuple, DenseIndex, DenseIndex> _countDims( const FastVector& factors, const FastVector& variableSlots) { gttic(countDims); @@ -188,7 +180,7 @@ boost::tuple, DenseIndex, DenseIndex> _countDims( } #endif - return boost::make_tuple(varDims, m, n); + return std::make_tuple(varDims, m, n); } /* ************************************************************************* */ @@ -219,18 +211,16 @@ void JacobianFactor::JacobianFactorHelper(const GaussianFactorGraph& graph, graph); // Count dimensions - FastVector varDims; - DenseIndex m, n; - boost::tie(varDims, m, n) = _countDims(jacobians, orderedSlots); + const auto [varDims, m, n] = _countDims(jacobians, orderedSlots); // Allocate matrix and copy keys in order gttic(allocate); Ab_ = VerticalBlockMatrix(varDims, m, true); // Allocate augmented matrix Base::keys_.resize(orderedSlots.size()); - boost::range::copy( - // Get variable keys - orderedSlots | boost::adaptors::indirected | boost::adaptors::map_keys, - Base::keys_.begin()); + // Copy keys in order + std::transform(orderedSlots.begin(), orderedSlots.end(), + Base::keys_.begin(), + [](const VariableSlots::const_iterator& it) {return it->first;}); gttoc(allocate); // Loop over slots in combined factor and copy blocks from source factors @@ -415,7 +405,7 @@ void JacobianFactor::print(const string& s, if (!s.empty()) cout << s << "\n"; for (const_iterator key = begin(); key != end(); ++key) { - cout << boost::format(" A[%1%] = ") % formatter(*key); + cout << " A[" << formatter(*key) << "] = "; cout << getA(key).format(matlabFormat()) << endl; } cout << formatMatrixIndented(" b = ", getb(), true) << "\n"; diff --git a/gtsam/linear/KalmanFilter.cpp b/gtsam/linear/KalmanFilter.cpp index ad6d31e1e..ded6bc5e3 100644 --- a/gtsam/linear/KalmanFilter.cpp +++ b/gtsam/linear/KalmanFilter.cpp @@ -38,12 +38,12 @@ KalmanFilter::solve(const GaussianFactorGraph& factorGraph) const { // Eliminate the graph using the provided Eliminate function Ordering ordering(factorGraph.keys()); - GaussianBayesNet::shared_ptr bayesNet = // + const auto bayesNet = // factorGraph.eliminateSequential(ordering, function_); // As this is a filter, all we need is the posterior P(x_t). // This is the last GaussianConditional in the resulting BayesNet - GaussianConditional::shared_ptr posterior = *(--bayesNet->end()); + GaussianConditional::shared_ptr posterior = bayesNet->back(); return std::make_shared(*posterior); } @@ -54,7 +54,8 @@ KalmanFilter::fuse(const State& p, GaussianFactor::shared_ptr newFactor) const { // Create a factor graph GaussianFactorGraph factorGraph; - factorGraph += p, newFactor; + factorGraph.push_back(p); + factorGraph.push_back(newFactor); // Eliminate graph in order x0, x1, to get Bayes net P(x0|x1)P(x1) return solve(factorGraph); @@ -66,7 +67,7 @@ KalmanFilter::State KalmanFilter::init(const Vector& x0, // Create a factor graph f(x0), eliminate it into P(x0) GaussianFactorGraph factorGraph; - factorGraph += JacobianFactor(0, I_, x0, P0); // |x-x0|^2_diagSigma + factorGraph.emplace_shared(0, I_, x0, P0); // |x-x0|^2_diagSigma return solve(factorGraph); } @@ -75,7 +76,7 @@ KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) const // Create a factor graph f(x0), eliminate it into P(x0) GaussianFactorGraph factorGraph; - factorGraph += HessianFactor(0, x, P0); // 0.5*(x-x0)'*inv(Sigma)*(x-x0) + factorGraph.emplace_shared(0, x, P0); // 0.5*(x-x0)'*inv(Sigma)*(x-x0) return solve(factorGraph); } diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 32dd1c535..2751e89f2 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -19,8 +19,6 @@ #include #include -#include - #include #include #include @@ -480,7 +478,7 @@ SharedDiagonal Constrained::QR(Matrix& Ab) const { const size_t maxRank = min(m, n); // create storage for [R d] - typedef boost::tuple Triple; + typedef std::tuple Triple; list Rd; Matrix rd(1, n + 1); // and for row of R @@ -506,7 +504,7 @@ SharedDiagonal Constrained::QR(Matrix& Ab) const { rd = Ab.row(*constraint_row); // Construct solution (r, d, sigma) - Rd.push_back(boost::make_tuple(j, rd, kInfinity)); + Rd.push_back(std::make_tuple(j, rd, kInfinity)); // exit after rank exhausted if (Rd.size() >= maxRank) @@ -552,7 +550,7 @@ SharedDiagonal Constrained::QR(Matrix& Ab) const { rd.block(0, j + 1, 1, n - j) = pseudo.transpose() * Ab.block(0, j + 1, m, n - j); // construct solution (r, d, sigma) - Rd.push_back(boost::make_tuple(j, rd, precision)); + Rd.push_back(std::make_tuple(j, rd, precision)); } else { // If precision is zero, no information on this column // This is actually not limited to constraints, could happen in Gaussian::QR @@ -577,9 +575,9 @@ SharedDiagonal Constrained::QR(Matrix& Ab) const { bool mixed = false; Ab.setZero(); // make sure we don't look below for (const Triple& t: Rd) { - const size_t& j = t.get<0>(); - const Matrix& rd = t.get<1>(); - precisions(i) = t.get<2>(); + const size_t& j = std::get<0>(t); + const Matrix& rd = std::get<1>(t); + precisions(i) = std::get<2>(t); if (std::isinf(precisions(i))) mixed = true; Ab.block(i, j, 1, n + 1 - j) = rd.block(0, j, 1, n + 1 - j); @@ -606,7 +604,7 @@ Isotropic::shared_ptr Isotropic::Variance(size_t dim, double variance, bool smar /* ************************************************************************* */ void Isotropic::print(const string& name) const { - cout << boost::format("isotropic dim=%1% sigma=%2%") % dim() % sigma_ << endl; + cout << "isotropic dim=" << dim() << " sigma=" << sigma_ << endl; } /* ************************************************************************* */ diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index e98cbf69d..cc461e63c 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -22,8 +22,6 @@ #include #include -#include - #include #include #include diff --git a/gtsam/linear/Preconditioner.cpp b/gtsam/linear/Preconditioner.cpp index cb7e36503..2b84e3736 100644 --- a/gtsam/linear/Preconditioner.cpp +++ b/gtsam/linear/Preconditioner.cpp @@ -13,8 +13,6 @@ #include #include #include -#include -#include #include #include @@ -42,7 +40,9 @@ void PreconditionerParameters::print(ostream &os) const { /***************************************************************************************/ PreconditionerParameters::Kernel PreconditionerParameters::kernelTranslator(const std::string &src) { - std::string s = src; boost::algorithm::to_upper(s); + std::string s = src; + // convert string s to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "GTSAM") return PreconditionerParameters::GTSAM; else if (s == "CHOLMOD") return PreconditionerParameters::CHOLMOD; /* default is cholmod */ @@ -51,7 +51,9 @@ PreconditionerParameters::Kernel PreconditionerParameters::kernelTranslator(cons /***************************************************************************************/ PreconditionerParameters::Verbosity PreconditionerParameters::verbosityTranslator(const std::string &src) { - std::string s = src; boost::algorithm::to_upper(s); + std::string s = src; + // convert string to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SILENT") return PreconditionerParameters::SILENT; else if (s == "COMPLEXITY") return PreconditionerParameters::COMPLEXITY; else if (s == "ERROR") return PreconditionerParameters::ERROR; @@ -145,8 +147,9 @@ void BlockJacobiPreconditioner::build( /* getting the block diagonals over the factors */ std::map hessianMap =gfg.hessianBlockDiagonal(); - for (const Matrix& hessian: hessianMap | boost::adaptors::map_values) + for (const auto& [key, hessian]: hessianMap) { blocks.push_back(hessian); + } /* if necessary, allocating the memory for cacheing the factorization results */ if ( nnz > bufferSize_ ) { diff --git a/gtsam/linear/SubgraphBuilder.cpp b/gtsam/linear/SubgraphBuilder.cpp index 97d681547..834fc8d12 100644 --- a/gtsam/linear/SubgraphBuilder.cpp +++ b/gtsam/linear/SubgraphBuilder.cpp @@ -23,11 +23,11 @@ #include #include #include +#include -#include +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #include -#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #endif @@ -52,28 +52,6 @@ using std::vector; namespace gtsam { -/*****************************************************************************/ -/* sort the container and return permutation index with default comparator */ -template -static vector sort_idx(const Container &src) { - typedef typename Container::value_type T; - const size_t n = src.size(); - vector > tmp; - tmp.reserve(n); - for (size_t i = 0; i < n; i++) tmp.emplace_back(i, src[i]); - - /* sort */ - std::stable_sort(tmp.begin(), tmp.end()); - - /* copy back */ - vector idx; - idx.reserve(n); - for (size_t i = 0; i < n; i++) { - idx.push_back(tmp[i].first); - } - return idx; -} - /****************************************************************************/ Subgraph::Subgraph(const vector &indices) { edges_.reserve(indices.size()); @@ -154,7 +132,8 @@ ostream &operator<<(ostream &os, const SubgraphBuilderParameters &p) { SubgraphBuilderParameters::Skeleton SubgraphBuilderParameters::skeletonTranslator(const std::string &src) { std::string s = src; - boost::algorithm::to_upper(s); + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "NATURALCHAIN") return NATURALCHAIN; else if (s == "BFS") @@ -182,7 +161,8 @@ std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton s) { SubgraphBuilderParameters::SkeletonWeight SubgraphBuilderParameters::skeletonWeightTranslator(const std::string &src) { std::string s = src; - boost::algorithm::to_upper(s); + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "EQUAL") return EQUAL; else if (s == "RHS") @@ -217,7 +197,8 @@ SubgraphBuilderParameters::AugmentationWeight SubgraphBuilderParameters::augmentationWeightTranslator( const std::string &src) { std::string s = src; - boost::algorithm::to_upper(s); + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SKELETON") return SKELETON; // else if (s == "STRETCH") return STRETCH; // else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH; @@ -240,7 +221,6 @@ std::string SubgraphBuilderParameters::augmentationWeightTranslator( /****************************************************************/ vector SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg, - const FastMap &ordering, const vector &weights) const { const SubgraphBuilderParameters &p = parameters_; switch (p.skeletonType) { @@ -251,7 +231,7 @@ vector SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg, return bfs(gfg); break; case SubgraphBuilderParameters::KRUSKAL: - return kruskal(gfg, ordering, weights); + return kruskal(gfg, weights); break; default: std::cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl; @@ -327,33 +307,8 @@ vector SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const { /****************************************************************/ vector SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, - const FastMap &ordering, const vector &weights) const { - const VariableIndex variableIndex(gfg); - const size_t n = variableIndex.size(); - const vector sortedIndices = sort_idx(weights); - - /* initialize buffer */ - vector treeIndices; - treeIndices.reserve(n - 1); - - // container for acsendingly sorted edges - DSFVector dsf(n); - - size_t count = 0; - for (const size_t index : sortedIndices) { - const GaussianFactor &gf = *gfg[index]; - const auto keys = gf.keys(); - if (keys.size() != 2) continue; - const size_t u = ordering.find(keys[0])->second, - v = ordering.find(keys[1])->second; - if (dsf.find(u) != dsf.find(v)) { - dsf.merge(u, v); - treeIndices.push_back(index); - if (++count == n - 1) break; - } - } - return treeIndices; + return utils::kruskal(gfg, weights); } /****************************************************************/ @@ -382,7 +337,7 @@ Subgraph SubgraphBuilder::operator()(const GaussianFactorGraph &gfg) const { vector weights = this->weights(gfg); // Build spanning tree. - const vector tree = buildTree(gfg, forward_ordering, weights); + const vector tree = buildTree(gfg, weights); if (tree.size() != n - 1) { throw std::runtime_error( "SubgraphBuilder::operator() failure: tree.size() != n-1, might be caused by disconnected graph"); @@ -406,6 +361,7 @@ Subgraph SubgraphBuilder::operator()(const GaussianFactorGraph &gfg) const { /****************************************************************/ SubgraphBuilder::Weights SubgraphBuilder::weights( const GaussianFactorGraph &gfg) const { + const size_t m = gfg.size(); Weights weight; weight.reserve(m); @@ -473,7 +429,7 @@ std::pair splitFactorGraph( remaining.remove(e.index); } - return std::make_pair(subgraphFactors, remaining); + return {subgraphFactors, remaining}; } /*****************************************************************************/ diff --git a/gtsam/linear/SubgraphBuilder.h b/gtsam/linear/SubgraphBuilder.h index fe8f704dc..aafba9306 100644 --- a/gtsam/linear/SubgraphBuilder.h +++ b/gtsam/linear/SubgraphBuilder.h @@ -163,13 +163,11 @@ class GTSAM_EXPORT SubgraphBuilder { private: std::vector buildTree(const GaussianFactorGraph &gfg, - const FastMap &ordering, const std::vector &weights) const; std::vector unary(const GaussianFactorGraph &gfg) const; std::vector natural_chain(const GaussianFactorGraph &gfg) const; std::vector bfs(const GaussianFactorGraph &gfg) const; std::vector kruskal(const GaussianFactorGraph &gfg, - const FastMap &ordering, const std::vector &weights) const; std::vector sample(const std::vector &weights, const size_t t) const; diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index b0a84bc2e..96f4847b5 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -24,9 +24,6 @@ #include #include -#include -#include - #include using std::cout; @@ -205,7 +202,8 @@ void SubgraphPreconditioner::solve(const Vector &y, Vector &x) const { assert(x.size() == y.size()); /* back substitute */ - for (const auto &cg : boost::adaptors::reverse(Rc1_)) { + for (auto it = std::make_reverse_iterator(Rc1_.end()); it != std::make_reverse_iterator(Rc1_.begin()); ++it) { + auto& cg = *it; /* collect a subvector of x that consists of the parents of cg (S) */ const KeyVector parentKeys(cg->beginParents(), cg->endParents()); const KeyVector frontalKeys(cg->beginFrontals(), cg->endFrontals()); diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index db8271e8b..722d7a54d 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -34,8 +34,7 @@ namespace gtsam { SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab, const Parameters ¶meters, const Ordering& ordering) : parameters_(parameters) { - GaussianFactorGraph Ab1, Ab2; - std::tie(Ab1, Ab2) = splitGraph(Ab); + const auto [Ab1, Ab2] = splitGraph(Ab); if (parameters_.verbosity()) cout << "Split A into (A1) " << Ab1.size() << " and (A2) " << Ab2.size() << " factors" << endl; diff --git a/gtsam/linear/VectorValues.cpp b/gtsam/linear/VectorValues.cpp index 04792b6ba..2c9ca7f06 100644 --- a/gtsam/linear/VectorValues.cpp +++ b/gtsam/linear/VectorValues.cpp @@ -18,21 +18,10 @@ #include -#include -#include -#include -#include -#include - using namespace std; namespace gtsam { - using boost::combine; - using boost::adaptors::transformed; - using boost::adaptors::map_values; - using boost::accumulate; - /* ************************************************************************ */ VectorValues::VectorValues(const VectorValues& first, const VectorValues& second) { @@ -46,16 +35,12 @@ namespace gtsam { /* ************************************************************************ */ VectorValues::VectorValues(const Vector& x, const Dims& dims) { - using Pair = pair; size_t j = 0; - for (const Pair& v : dims) { - Key key; - size_t n; - boost::tie(key, n) = v; + for (const auto& [key,n] : dims) { #ifdef TBB_GREATER_EQUAL_2020 values_.emplace(key, x.segment(j, n)); #else - values_.insert(std::make_pair(key, x.segment(j, n))); + values_.insert({key, x.segment(j, n)}); #endif j += n; } @@ -68,7 +53,7 @@ namespace gtsam { #ifdef TBB_GREATER_EQUAL_2020 values_.emplace(v.key, x.segment(j, v.dimension)); #else - values_.insert(std::make_pair(v.key, x.segment(j, v.dimension))); + values_.insert({v.key, x.segment(j, v.dimension)}); #endif j += v.dimension; } @@ -78,11 +63,11 @@ namespace gtsam { VectorValues VectorValues::Zero(const VectorValues& other) { VectorValues result; - for(const KeyValuePair& v: other) + for(const auto& [key,value]: other) #ifdef TBB_GREATER_EQUAL_2020 - result.values_.emplace(v.first, Vector::Zero(v.second.size())); + result.values_.emplace(key, Vector::Zero(value.size())); #else - result.values_.insert(std::make_pair(v.first, Vector::Zero(v.second.size()))); + result.values_.insert({key, Vector::Zero(value.size())}); #endif return result; } @@ -100,18 +85,18 @@ namespace gtsam { /* ************************************************************************ */ VectorValues& VectorValues::update(const VectorValues& values) { iterator hint = begin(); - for (const KeyValuePair& key_value : values) { + for (const auto& [key,value] : values) { // Use this trick to find the value using a hint, since we are inserting // from another sorted map size_t oldSize = values_.size(); - hint = values_.insert(hint, key_value); + hint = values_.insert(hint, {key, value}); if (values_.size() > oldSize) { values_.unsafe_erase(hint); throw out_of_range( "Requested to update a VectorValues with another VectorValues that " "contains keys not present in the first."); } else { - hint->second = key_value.second; + hint->second = value; } } return *this; @@ -131,8 +116,9 @@ namespace gtsam { /* ************************************************************************ */ void VectorValues::setZero() { - for(Vector& v: values_ | map_values) - v.setZero(); + for(auto& [key, value] : *this) { + value.setZero(); + } } /* ************************************************************************ */ @@ -140,16 +126,15 @@ namespace gtsam { // Change print depending on whether we are using TBB #ifdef GTSAM_USE_TBB map sorted; - for (const auto& key_value : v) { - sorted.emplace(key_value.first, key_value.second); + for (const auto& [key,value] : v) { + sorted.emplace(key, value); } - for (const auto& key_value : sorted) + for (const auto& [key,value] : sorted) #else - for (const auto& key_value : v) + for (const auto& [key,value] : v) #endif { - os << " " << StreamedKey(key_value.first) << ": " << key_value.second.transpose() - << "\n"; + os << " " << StreamedKey(key) << ": " << value.transpose() << "\n"; } return os; } @@ -166,9 +151,11 @@ namespace gtsam { bool VectorValues::equals(const VectorValues& x, double tol) const { if(this->size() != x.size()) return false; - for(const auto values: boost::combine(*this, x)) { - if(values.get<0>().first != values.get<1>().first || - !equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol)) + auto this_it = this->begin(); + auto x_it = x.begin(); + for(; this_it != this->end(); ++this_it, ++x_it) { + if(this_it->first != x_it->first || + !equal_with_abs_tol(this_it->second, x_it->second, tol)) return false; } return true; @@ -178,14 +165,15 @@ namespace gtsam { Vector VectorValues::vector() const { // Count dimensions DenseIndex totalDim = 0; - for (const Vector& v : *this | map_values) totalDim += v.size(); + for (const auto& [key, value] : *this) + totalDim += value.size(); // Copy vectors Vector result(totalDim); DenseIndex pos = 0; - for (const Vector& v : *this | map_values) { - result.segment(pos, v.size()) = v; - pos += v.size(); + for (const auto& [key, value] : *this) { + result.segment(pos, value.size()) = value; + pos += value.size(); } return result; @@ -196,7 +184,7 @@ namespace gtsam { { // Count dimensions DenseIndex totalDim = 0; - for(size_t dim: keys | map_values) + for (const auto& [key, dim] : keys) totalDim += dim; Vector result(totalDim); size_t j = 0; @@ -215,19 +203,19 @@ namespace gtsam { /* ************************************************************************ */ namespace internal { - bool structureCompareOp(const boost::tuple& vv) + bool structureCompareOp(const VectorValues::value_type& a, const VectorValues::value_type& b) { - return vv.get<0>().first == vv.get<1>().first - && vv.get<0>().second.size() == vv.get<1>().second.size(); + return a.first == b.first && a.second.size() == b.second.size(); } } /* ************************************************************************ */ bool VectorValues::hasSameStructure(const VectorValues other) const { - return accumulate(combine(*this, other) - | transformed(internal::structureCompareOp), true, logical_and()); + // compare the "other" container with this one, using the structureCompareOp + // and then return true if all elements are compared as equal + return std::equal(this->begin(), this->end(), other.begin(), other.end(), + internal::structureCompareOp); } /* ************************************************************************ */ @@ -236,14 +224,14 @@ namespace gtsam { if(this->size() != v.size()) throw invalid_argument("VectorValues::dot called with a VectorValues of different structure"); double result = 0.0; - typedef boost::tuple ValuePair; - using boost::adaptors::map_values; - for(const ValuePair values: boost::combine(*this, v)) { - assert_throw(values.get<0>().first == values.get<1>().first, - invalid_argument("VectorValues::dot called with a VectorValues of different structure")); - assert_throw(values.get<0>().second.size() == values.get<1>().second.size(), - invalid_argument("VectorValues::dot called with a VectorValues of different structure")); - result += values.get<0>().second.dot(values.get<1>().second); + auto this_it = this->begin(); + auto v_it = v.begin(); + for(; this_it != this->end(); ++this_it, ++v_it) { + assert_throw(this_it->first == v_it->first, + invalid_argument("VectorValues::dot called with a VectorValues of different structure")); + assert_throw(this_it->second.size() == v_it->second.size(), + invalid_argument("VectorValues::dot called with a VectorValues of different structure")); + result += this_it->second.dot(v_it->second); } return result; } @@ -256,9 +244,9 @@ namespace gtsam { /* ************************************************************************ */ double VectorValues::squaredNorm() const { double sumSquares = 0.0; - using boost::adaptors::map_values; - for(const Vector& v: *this | map_values) - sumSquares += v.squaredNorm(); + for(const auto& [key, value]: *this) { + sumSquares += value.squaredNorm(); + } return sumSquares; } @@ -276,7 +264,7 @@ namespace gtsam { #ifdef TBB_GREATER_EQUAL_2020 result.values_.emplace(j1->first, j1->second + j2->second); #else - result.values_.insert(std::make_pair(j1->first, j1->second + j2->second)); + result.values_.insert({j1->first, j1->second + j2->second}); #endif return result; @@ -338,7 +326,7 @@ namespace gtsam { #ifdef TBB_GREATER_EQUAL_2020 result.values_.emplace(j1->first, j1->second - j2->second); #else - result.values_.insert(std::make_pair(j1->first, j1->second - j2->second)); + result.values_.insert({j1->first, j1->second - j2->second}); #endif return result; @@ -358,7 +346,7 @@ namespace gtsam { #ifdef TBB_GREATER_EQUAL_2020 result.values_.emplace(key_v.first, a * key_v.second); #else - result.values_.insert(std::make_pair(key_v.first, a * key_v.second)); + result.values_.insert({key_v.first, a * key_v.second}); #endif return result; } @@ -372,8 +360,9 @@ namespace gtsam { /* ************************************************************************ */ VectorValues& VectorValues::operator*=(double alpha) { - for(Vector& v: *this | map_values) - v *= alpha; + for (auto& [key, value]: *this) { + value *= alpha; + } return *this; } diff --git a/gtsam/linear/VectorValues.h b/gtsam/linear/VectorValues.h index bb5bbc794..478cfd770 100644 --- a/gtsam/linear/VectorValues.h +++ b/gtsam/linear/VectorValues.h @@ -80,7 +80,7 @@ namespace gtsam { public: typedef Values::iterator iterator; ///< Iterator over vector values typedef Values::const_iterator const_iterator; ///< Const iterator over vector values - typedef std::shared_ptr shared_ptr; ///< shared_ptr to this class + typedef std::shared_ptr shared_ptr; ///< shared_ptr to this class typedef Values::value_type value_type; ///< Typedef to pair typedef value_type KeyValuePair; ///< Typedef to pair typedef std::map Dims; ///< Keyed vector dimensions @@ -186,7 +186,7 @@ namespace gtsam { #if ! defined(GTSAM_USE_TBB) || defined (TBB_GREATER_EQUAL_2020) return values_.emplace(std::piecewise_construct, std::forward_as_tuple(j), std::forward_as_tuple(args...)); #else - return values_.insert(std::make_pair(j, Vector(std::forward(args)...))); + return values_.insert({j, Vector(std::forward(args)...)}); #endif } @@ -195,7 +195,7 @@ namespace gtsam { * @param value The vector to be inserted. * @param j The index with which the value will be associated. */ iterator insert(Key j, const Vector& value) { - return insert(std::make_pair(j, value)); + return insert({j, value}); } /** Insert all values from \c values. Throws an invalid_argument exception if any keys to be @@ -210,7 +210,7 @@ namespace gtsam { #ifdef TBB_GREATER_EQUAL_2020 return values_.emplace(j, value); #else - return values_.insert(std::make_pair(j, value)); + return values_.insert({j, value}); #endif } diff --git a/gtsam/linear/linearExceptions.cpp b/gtsam/linear/linearExceptions.cpp index ada3a298c..75f284348 100644 --- a/gtsam/linear/linearExceptions.cpp +++ b/gtsam/linear/linearExceptions.cpp @@ -18,8 +18,6 @@ #include #include -#include -#include namespace gtsam { @@ -29,9 +27,8 @@ namespace gtsam { if(!description_) { description_ = String( "\nIndeterminant linear system detected while working near variable\n" - + boost::lexical_cast(j_) + - + " (Symbol: " + boost::lexical_cast( - gtsam::DefaultKeyFormatter(gtsam::Symbol(j_))) + ").\n" + + std::to_string(j_) + + + " (Symbol: " + gtsam::DefaultKeyFormatter(gtsam::Symbol(j_)) + ").\n" "\n\ Thrown when a linear system is ill-posed. The most common cause for this\n\ error is having underconstrained variables. Mathematically, the system is\n\ @@ -45,22 +42,21 @@ more information."); /* ************************************************************************* */ const char* InvalidNoiseModel::what() const noexcept { if(description_.empty()) - description_ = (boost::format( - "A JacobianFactor was attempted to be constructed or modified to use a\n" - "noise model of incompatible dimension. The JacobianFactor has\n" - "dimensionality (i.e. length of error vector) %d but the provided noise\n" - "model has dimensionality %d.") % factorDims % noiseModelDims).str(); + description_ = "A JacobianFactor was attempted to be constructed or modified to use a\n" + "noise model of incompatible dimension. The JacobianFactor has\n" + "dimensionality (i.e. length of error vector) " + std::to_string(factorDims) + + " but the provided noise model has dimensionality " + std::to_string(noiseModelDims) + "."; return description_.c_str(); } /* ************************************************************************* */ const char* InvalidMatrixBlock::what() const noexcept { - if(description_.empty()) - description_ = (boost::format( - "A JacobianFactor was attempted to be constructed with a matrix block of\n" - "inconsistent dimension. The JacobianFactor has %d rows (i.e. length of\n" - "error vector) but the provided matrix block has %d rows.") - % factorRows % blockRows).str(); + if(description_.empty()) { + description_ = "A JacobianFactor was attempted to be constructed with a matrix block of\n" + "inconsistent dimension. The JacobianFactor has " + std::to_string(factorRows) + + " rows (i.e. length of error vector) but the provided matrix block has " + + std::to_string(blockRows) + " rows."; + } return description_.c_str(); } diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index ec208fa7b..966b70915 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -24,8 +24,6 @@ #include #include -#include -#include // STL/C++ #include @@ -51,8 +49,7 @@ static GaussianBayesNet noisyBayesNet = { /* ************************************************************************* */ TEST( GaussianBayesNet, Matrix ) { - Matrix R; Vector d; - boost::tie(R,d) = smallBayesNet.matrix(); // find matrix and RHS + const auto [R, d] = smallBayesNet.matrix(); // find matrix and RHS Matrix R1 = (Matrix2() << 1.0, 1.0, @@ -101,8 +98,7 @@ TEST(GaussianBayesNet, Evaluate2) { /* ************************************************************************* */ TEST( GaussianBayesNet, NoisyMatrix ) { - Matrix R; Vector d; - boost::tie(R,d) = noisyBayesNet.matrix(); // find matrix and RHS + const auto [R, d] = noisyBayesNet.matrix(); // find matrix and RHS Matrix R1 = (Matrix2() << 0.5, 0.5, @@ -124,9 +120,7 @@ TEST(GaussianBayesNet, Optimize) { /* ************************************************************************* */ TEST(GaussianBayesNet, NoisyOptimize) { - Matrix R; - Vector d; - boost::tie(R, d) = noisyBayesNet.matrix(); // find matrix and RHS + const auto [R, d] = noisyBayesNet.matrix(); // find matrix and RHS const Vector x = R.inverse() * d; const VectorValues expected{{_x_, x.head(1)}, {_y_, x.tail(1)}}; @@ -216,8 +210,7 @@ TEST(GaussianBayesNet, MonteCarloIntegration) { /* ************************************************************************* */ TEST(GaussianBayesNet, ordering) { - Ordering expected; - expected += _x_, _y_; + const Ordering expected{_x_, _y_}; const auto actual = noisyBayesNet.ordering(); EXPECT(assert_equal(expected, actual)); } @@ -237,9 +230,7 @@ TEST( GaussianBayesNet, MatrixStress ) KeyVector({_y_, _x_, _z_}), KeyVector({_y_, _z_, _x_}), KeyVector({_z_, _x_, _y_}), KeyVector({_z_, _y_, _x_})}) { const Ordering ordering(keys); - Matrix R; - Vector d; - boost::tie(R, d) = bn.matrix(ordering); + const auto [R, d] = bn.matrix(ordering); EXPECT(assert_equal(expected.vector(ordering), R.inverse() * d)); } } @@ -286,15 +277,15 @@ TEST( GaussianBayesNet, backSubstituteTransposeNoisy ) TEST( GaussianBayesNet, DeterminantTest ) { GaussianBayesNet cbn; - cbn += GaussianConditional( + cbn.emplace_shared( 0, Vector2(3.0, 4.0), (Matrix2() << 1.0, 3.0, 0.0, 4.0).finished(), 1, (Matrix2() << 2.0, 1.0, 2.0, 3.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0)); - cbn += GaussianConditional( + cbn.emplace_shared( 1, Vector2(5.0, 6.0), (Matrix2() << 1.0, 1.0, 0.0, 3.0).finished(), 2, (Matrix2() << 1.0, 0.0, 5.0, 2.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0)); - cbn += GaussianConditional( + cbn.emplace_shared( 3, Vector2(7.0, 8.0), (Matrix2() << 1.0, 1.0, 0.0, 5.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0)); double expectedDeterminant = 60.0 / 64.0; @@ -317,22 +308,22 @@ TEST(GaussianBayesNet, ComputeSteepestDescentPoint) { // Create an arbitrary Bayes Net GaussianBayesNet gbn; - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + gbn.emplace_shared( 0, Vector2(1.0,2.0), (Matrix2() << 3.0,4.0,0.0,6.0).finished(), 3, (Matrix2() << 7.0,8.0,9.0,10.0).finished(), - 4, (Matrix2() << 11.0,12.0,13.0,14.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 4, (Matrix2() << 11.0,12.0,13.0,14.0).finished()); + gbn.emplace_shared( 1, Vector2(15.0,16.0), (Matrix2() << 17.0,18.0,0.0,20.0).finished(), 2, (Matrix2() << 21.0,22.0,23.0,24.0).finished(), - 4, (Matrix2() << 25.0,26.0,27.0,28.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 4, (Matrix2() << 25.0,26.0,27.0,28.0).finished()); + gbn.emplace_shared( 2, Vector2(29.0,30.0), (Matrix2() << 31.0,32.0,0.0,34.0).finished(), - 3, (Matrix2() << 35.0,36.0,37.0,38.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 3, (Matrix2() << 35.0,36.0,37.0,38.0).finished()); + gbn.emplace_shared( 3, Vector2(39.0,40.0), (Matrix2() << 41.0,42.0,0.0,44.0).finished(), - 4, (Matrix2() << 45.0,46.0,47.0,48.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( - 4, Vector2(49.0,50.0), (Matrix2() << 51.0,52.0,0.0,54.0).finished())); + 4, (Matrix2() << 45.0,46.0,47.0,48.0).finished()); + gbn.emplace_shared( + 4, Vector2(49.0,50.0), (Matrix2() << 51.0,52.0,0.0,54.0).finished()); // Compute the Hessian numerically Matrix hessian = numericalHessian( diff --git a/gtsam/linear/tests/testGaussianBayesTree.cpp b/gtsam/linear/tests/testGaussianBayesTree.cpp index 52a3123bf..899d69814 100644 --- a/gtsam/linear/tests/testGaussianBayesTree.cpp +++ b/gtsam/linear/tests/testGaussianBayesTree.cpp @@ -299,10 +299,10 @@ TEST(GaussianBayesTree, determinant_and_smallestEigenvalue) { GaussianFactorGraph fg; Key x1 = 2, x2 = 0, l1 = 1; SharedDiagonal unit2 = noiseModel::Unit::Create(2); - fg += JacobianFactor(x1, 10 * I_2x2, -1.0 * Vector::Ones(2), unit2); - fg += JacobianFactor(x2, 10 * I_2x2, x1, -10 * I_2x2, Vector2(2.0, -1.0), unit2); - fg += JacobianFactor(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2); - fg += JacobianFactor(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2); + fg.emplace_shared(x1, 10 * I_2x2, -1.0 * Vector::Ones(2), unit2); + fg.emplace_shared(x2, 10 * I_2x2, x1, -10 * I_2x2, Vector2(2.0, -1.0), unit2); + fg.emplace_shared(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2); + fg.emplace_shared(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2); // create corresponding Bayes tree: std::shared_ptr bt = fg.eliminateMultifrontal(); @@ -327,15 +327,14 @@ TEST(GaussianBayesTree, LogDeterminant) { GaussianFactorGraph fg; Key x1 = X(1), x2 = X(2), l1 = L(1); SharedDiagonal unit2 = noiseModel::Unit::Create(2); - fg += JacobianFactor(x1, 10 * I_2x2, -1.0 * Vector2::Ones(), unit2); - fg += JacobianFactor(x2, 10 * I_2x2, x1, -10 * I_2x2, Vector2(2.0, -1.0), - unit2); - fg += JacobianFactor(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2); - fg += - JacobianFactor(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2); - fg += JacobianFactor(x3, 10 * I_2x2, x2, -10 * I_2x2, Vector2(2.0, -1.0), - unit2); - fg += JacobianFactor(x3, 10 * I_2x2, -1.0 * Vector2::Ones(), unit2); + fg.emplace_shared(x1, 10 * I_2x2, -1.0 * Vector2::Ones(), unit2); + fg.emplace_shared(x2, 10 * I_2x2, x1, -10 * I_2x2, + Vector2(2.0, -1.0), unit2); + fg.emplace_shared(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2); + fg.emplace_shared(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2); + fg.emplace_shared(x3, 10 * I_2x2, x2, -10 * I_2x2, + Vector2(2.0, -1.0), unit2); + fg.emplace_shared(x3, 10 * I_2x2, -1.0 * Vector2::Ones(), unit2); // create corresponding Bayes net and Bayes tree: std::shared_ptr bn = fg.eliminateSequential(); diff --git a/gtsam/linear/tests/testGaussianFactorGraph.cpp b/gtsam/linear/tests/testGaussianFactorGraph.cpp index 2ed4e6589..e9e626296 100644 --- a/gtsam/linear/tests/testGaussianFactorGraph.cpp +++ b/gtsam/linear/tests/testGaussianFactorGraph.cpp @@ -51,11 +51,10 @@ TEST(GaussianFactorGraph, initialization) { GaussianFactorGraph fg; SharedDiagonal unit2 = noiseModel::Unit::Create(2); - fg += - JacobianFactor(0, 10*I_2x2, -1.0*Vector::Ones(2), unit2), - JacobianFactor(0, -10*I_2x2,1, 10*I_2x2, Vector2(2.0, -1.0), unit2), - JacobianFactor(0, -5*I_2x2, 2, 5*I_2x2, Vector2(0.0, 1.0), unit2), - JacobianFactor(1, -5*I_2x2, 2, 5*I_2x2, Vector2(-1.0, 1.5), unit2); + fg.emplace_shared(0, 10*I_2x2, -1.0*Vector::Ones(2), unit2); + fg.emplace_shared(0, -10*I_2x2,1, 10*I_2x2, Vector2(2.0, -1.0), unit2); + fg.emplace_shared(0, -5*I_2x2, 2, 5*I_2x2, Vector2(0.0, 1.0), unit2); + fg.emplace_shared(1, -5*I_2x2, 2, 5*I_2x2, Vector2(-1.0, 1.5), unit2); EXPECT_LONGS_EQUAL(4, (long)fg.size()); @@ -155,20 +154,16 @@ TEST(GaussianFactorGraph, matrices) { // jacobian Matrix A = Ab.leftCols(Ab.cols() - 1); Vector b = Ab.col(Ab.cols() - 1); - Matrix actualA; - Vector actualb; - boost::tie(actualA, actualb) = gfg.jacobian(); + const auto [actualA, actualb] = gfg.jacobian(); EXPECT(assert_equal(A, actualA)); EXPECT(assert_equal(b, actualb)); // hessian Matrix L = A.transpose() * A; Vector eta = A.transpose() * b; - Matrix actualL; - Vector actualeta; - boost::tie(actualL, actualeta) = gfg.hessian(); + const auto [actualL, actualEta] = gfg.hessian(); EXPECT(assert_equal(L, actualL)); - EXPECT(assert_equal(eta, actualeta)); + EXPECT(assert_equal(eta, actualEta)); // hessianBlockDiagonal VectorValues expectLdiagonal; // Make explicit that diagonal is sum-squares of columns @@ -190,13 +185,13 @@ static GaussianFactorGraph createSimpleGaussianFactorGraph() { Key x1 = 2, x2 = 0, l1 = 1; SharedDiagonal unit2 = noiseModel::Unit::Create(2); // linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_] - fg += JacobianFactor(x1, 10 * I_2x2, -1.0 * Vector::Ones(2), unit2); + fg.emplace_shared(x1, 10 * I_2x2, -1.0 * Vector::Ones(2), unit2); // odometry between x1 and x2: x2-x1=[0.2;-0.1] - fg += JacobianFactor(x2, 10 * I_2x2, x1, -10 * I_2x2, Vector2(2.0, -1.0), unit2); + fg.emplace_shared(x2, 10 * I_2x2, x1, -10 * I_2x2, Vector2(2.0, -1.0), unit2); // measurement between x1 and l1: l1-x1=[0.0;0.2] - fg += JacobianFactor(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2); + fg.emplace_shared(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2); // measurement between x2 and l1: l1-x2=[-0.2;0.3] - fg += JacobianFactor(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2); + fg.emplace_shared(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2); return fg; } @@ -245,9 +240,7 @@ TEST(GaussianFactorGraph, eliminate_empty) { // eliminate an empty factor GaussianFactorGraph gfg; gfg.add(JacobianFactor()); - GaussianBayesNet::shared_ptr actualBN; - GaussianFactorGraph::shared_ptr remainingGFG; - boost::tie(actualBN, remainingGFG) = gfg.eliminatePartialSequential(Ordering()); + const auto [actualBN, remainingGFG] = gfg.eliminatePartialSequential(Ordering()); // expected Bayes net is empty GaussianBayesNet expectedBN; @@ -263,12 +256,8 @@ TEST(GaussianFactorGraph, eliminate_empty) { /* ************************************************************************* */ TEST(GaussianFactorGraph, matrices2) { GaussianFactorGraph gfg = createSimpleGaussianFactorGraph(); - Matrix A; - Vector b; - boost::tie(A, b) = gfg.jacobian(); - Matrix AtA; - Vector eta; - boost::tie(AtA, eta) = gfg.hessian(); + const auto [A, b] = gfg.jacobian(); + const auto [AtA, eta] = gfg.hessian(); EXPECT(assert_equal(A.transpose() * A, AtA)); EXPECT(assert_equal(A.transpose() * b, eta)); Matrix expectedAtA(6, 6); @@ -314,9 +303,7 @@ TEST(GaussianFactorGraph, multiplyHessianAdd2) { GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor(); // brute force - Matrix AtA; - Vector eta; - boost::tie(AtA, eta) = gfg.hessian(); + const auto [AtA, eta] = gfg.hessian(); Vector X(6); X << 1, 2, 3, 4, 5, 6; Vector Y(6); @@ -341,12 +328,8 @@ TEST(GaussianFactorGraph, multiplyHessianAdd2) { /* ************************************************************************* */ TEST(GaussianFactorGraph, matricesMixed) { GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor(); - Matrix A; - Vector b; - boost::tie(A, b) = gfg.jacobian(); // incorrect ! - Matrix AtA; - Vector eta; - boost::tie(AtA, eta) = gfg.hessian(); // correct + const auto [A, b] = gfg.jacobian(); // incorrect ! + const auto [AtA, eta] = gfg.hessian(); // correct EXPECT(assert_equal(A.transpose() * A, AtA)); Vector expected = -(Vector(6) << -25, 17.5, 5, -13.5, 29, 4).finished(); EXPECT(assert_equal(expected, eta)); diff --git a/gtsam/linear/tests/testHessianFactor.cpp b/gtsam/linear/tests/testHessianFactor.cpp index 5fd6e833f..90c443fae 100644 --- a/gtsam/linear/tests/testHessianFactor.cpp +++ b/gtsam/linear/tests/testHessianFactor.cpp @@ -293,15 +293,11 @@ TEST(HessianFactor, CombineAndEliminate1) { // perform elimination on jacobian Ordering ordering {1}; - GaussianConditional::shared_ptr expectedConditional; - JacobianFactor::shared_ptr expectedFactor; - boost::tie(expectedConditional, expectedFactor) = jacobian.eliminate(ordering); + const auto [expectedConditional, expectedFactor] = jacobian.eliminate(ordering); CHECK(expectedFactor); // Eliminate - GaussianConditional::shared_ptr actualConditional; - HessianFactor::shared_ptr actualHessian; - boost::tie(actualConditional, actualHessian) = // + const auto [actualConditional, actualHessian] = // EliminateCholesky(gfg, ordering); actualConditional->setModel(false,Vector3(1,1,1)); // add a unit model for comparison @@ -356,15 +352,11 @@ TEST(HessianFactor, CombineAndEliminate2) { // perform elimination on jacobian Ordering ordering {0}; - GaussianConditional::shared_ptr expectedConditional; - JacobianFactor::shared_ptr expectedFactor; - boost::tie(expectedConditional, expectedFactor) = // + const auto [expectedConditional, expectedFactor] = jacobian.eliminate(ordering); // Eliminate - GaussianConditional::shared_ptr actualConditional; - HessianFactor::shared_ptr actualHessian; - boost::tie(actualConditional, actualHessian) = // + const auto [actualConditional, actualHessian] = // EliminateCholesky(gfg, ordering); actualConditional->setModel(false,Vector3(1,1,1)); // add a unit model for comparison @@ -498,7 +490,7 @@ TEST(HessianFactor, gradientAtZero) // test gradient at zero VectorValues expectedG{{0, -g1}, {1, -g2}}; - Matrix A; Vector b; boost::tie(A,b) = factor.jacobian(); + const auto [A, b] = factor.jacobian(); KeyVector keys {0, 1}; EXPECT(assert_equal(-A.transpose()*b, expectedG.vector(keys))); VectorValues actualG = factor.gradientAtZero(); diff --git a/gtsam/linear/tests/testJacobianFactor.cpp b/gtsam/linear/tests/testJacobianFactor.cpp index ad722eb94..0ad77b366 100644 --- a/gtsam/linear/tests/testJacobianFactor.cpp +++ b/gtsam/linear/tests/testJacobianFactor.cpp @@ -25,9 +25,6 @@ #include #include -#include -#include - using namespace std; using namespace gtsam; @@ -36,8 +33,8 @@ using Dims = std::vector; // For constructing block matrices namespace { namespace simple { // Terms we'll use - const vector > terms{ - {5, I_3x3}, {10, 2 * I_3x3}, {15, 3 * I_3x3}}; + using Terms = vector >; + const Terms terms{{5, I_3x3}, {10, 2 * I_3x3}, {15, 3 * I_3x3}}; // RHS and sigmas const Vector b = Vector3(1., 2., 3.); @@ -54,8 +51,7 @@ TEST(JacobianFactor, constructors_and_accessors) // Test for using different numbers of terms { // b vector only constructor - JacobianFactor expected( - boost::make_iterator_range(terms.begin(), terms.begin()), b); + JacobianFactor expected(Terms{}, b); JacobianFactor actual(b); EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(b, expected.getb())); @@ -65,8 +61,7 @@ TEST(JacobianFactor, constructors_and_accessors) } { // One term constructor - JacobianFactor expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 1), b, noise); + JacobianFactor expected(Terms{terms[0]}, b, noise); JacobianFactor actual(terms[0].first, terms[0].second, b, noise); EXPECT(assert_equal(expected, actual)); LONGS_EQUAL((long)terms[0].first, (long)actual.keys().back()); @@ -78,8 +73,7 @@ TEST(JacobianFactor, constructors_and_accessors) } { // Two term constructor - JacobianFactor expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 2), b, noise); + JacobianFactor expected(Terms{terms[0], terms[1]}, b, noise); JacobianFactor actual(terms[0].first, terms[0].second, terms[1].first, terms[1].second, b, noise); EXPECT(assert_equal(expected, actual)); @@ -92,8 +86,7 @@ TEST(JacobianFactor, constructors_and_accessors) } { // Three term constructor - JacobianFactor expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, noise); + JacobianFactor expected(Terms{terms[0], terms[1], terms[2]}, b, noise); JacobianFactor actual(terms[0].first, terms[0].second, terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise); EXPECT(assert_equal(expected, actual)); @@ -106,8 +99,7 @@ TEST(JacobianFactor, constructors_and_accessors) } { // Test three-term constructor with std::map - JacobianFactor expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, noise); + JacobianFactor expected(Terms{terms[0], terms[1], terms[2]}, b, noise); map mapTerms; // note order of insertion plays no role: order will be determined by keys mapTerms.insert(terms[2]); @@ -124,14 +116,17 @@ TEST(JacobianFactor, constructors_and_accessors) } { // VerticalBlockMatrix constructor - JacobianFactor expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, noise); + JacobianFactor expected(Terms{terms[0], terms[1], terms[2]}, b, noise); VerticalBlockMatrix blockMatrix(Dims{3, 3, 3, 1}, 3); blockMatrix(0) = terms[0].second; blockMatrix(1) = terms[1].second; blockMatrix(2) = terms[2].second; blockMatrix(3) = b; - JacobianFactor actual(terms | boost::adaptors::map_keys, blockMatrix, noise); + // get a vector of keys from the terms + vector keys; + for (const auto& term : terms) + keys.push_back(term.first); + JacobianFactor actual(keys, blockMatrix, noise); EXPECT(assert_equal(expected, actual)); LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back()); EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1))); @@ -371,7 +366,7 @@ TEST(JacobianFactor, operators ) EXPECT(assert_equal(expectedX, actualX)); // test gradient at zero - Matrix A; Vector b2; boost::tie(A,b2) = lf.jacobian(); + const auto [A, b2] = lf.jacobian(); VectorValues expectedG; expectedG.insert(1, Vector2(20,-10)); expectedG.insert(2, Vector2(-20, 10)); diff --git a/gtsam/linear/tests/testRegularHessianFactor.cpp b/gtsam/linear/tests/testRegularHessianFactor.cpp index 2e4d2249e..b86451600 100644 --- a/gtsam/linear/tests/testRegularHessianFactor.cpp +++ b/gtsam/linear/tests/testRegularHessianFactor.cpp @@ -66,11 +66,11 @@ TEST(RegularHessianFactor, Constructors) // Test constructor from Gaussian Factor Graph GaussianFactorGraph gfg; - gfg += jf; + gfg.push_back(jf); RegularHessianFactor<2> factor4(gfg); EXPECT(assert_equal(factor, factor4)); GaussianFactorGraph gfg2; - gfg2 += factor; + gfg2.push_back(factor); RegularHessianFactor<2> factor5(gfg); EXPECT(assert_equal(factor, factor5)); diff --git a/gtsam/linear/tests/testRegularJacobianFactor.cpp b/gtsam/linear/tests/testRegularJacobianFactor.cpp index 205d9d092..a21bbf982 100644 --- a/gtsam/linear/tests/testRegularJacobianFactor.cpp +++ b/gtsam/linear/tests/testRegularJacobianFactor.cpp @@ -24,9 +24,6 @@ #include -#include -#include - using namespace std; using namespace gtsam; diff --git a/gtsam/linear/tests/testVectorValues.cpp b/gtsam/linear/tests/testVectorValues.cpp index 49fc56d19..a4e101658 100644 --- a/gtsam/linear/tests/testVectorValues.cpp +++ b/gtsam/linear/tests/testVectorValues.cpp @@ -21,12 +21,9 @@ #include -#include - #include using namespace std; -using boost::adaptors::map_keys; using namespace gtsam; /* ************************************************************************* */ diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 94a37c2d2..dc0866045 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -27,7 +27,6 @@ #include #include #include -#include namespace gtsam { @@ -336,6 +335,7 @@ public: OptionalMatrixType H5, OptionalMatrixType H6) const override; private: +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ friend class boost::serialization::access; template @@ -345,6 +345,7 @@ public: "NoiseModelFactor6", boost::serialization::base_object(*this)); ar& BOOST_SERIALIZATION_NVP(_PIM_); } +#endif public: GTSAM_MAKE_ALIGNED_OPERATOR_NEW diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 59ff688af..40bc15e11 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -320,6 +320,7 @@ public: private: +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ friend class boost::serialization::access; template @@ -329,6 +330,7 @@ private: boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(_PIM_); } +#endif }; // class ImuFactor2 diff --git a/gtsam/navigation/ManifoldPreintegration.cpp b/gtsam/navigation/ManifoldPreintegration.cpp index 526bf6c73..c0c917d9c 100644 --- a/gtsam/navigation/ManifoldPreintegration.cpp +++ b/gtsam/navigation/ManifoldPreintegration.cpp @@ -68,7 +68,7 @@ void ManifoldPreintegration::update(const Vector3& measuredAcc, // Possibly correct for sensor pose Matrix3 D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega; if (p().body_P_sensor) - boost::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega, + std::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega, D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega); // Save current rotation for updating Jacobians diff --git a/gtsam/navigation/TangentPreintegration.cpp b/gtsam/navigation/TangentPreintegration.cpp index e8579daba..a472b2cfd 100644 --- a/gtsam/navigation/TangentPreintegration.cpp +++ b/gtsam/navigation/TangentPreintegration.cpp @@ -112,7 +112,7 @@ void TangentPreintegration::update(const Vector3& measuredAcc, // Possibly correct for sensor pose by converting to body frame Matrix3 D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega; if (p().body_P_sensor) - boost::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega, + std::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega, D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega); // Do update diff --git a/gtsam/navigation/tests/testAttitudeFactor.cpp b/gtsam/navigation/tests/testAttitudeFactor.cpp index 0973fe8ac..f1fde1dca 100644 --- a/gtsam/navigation/tests/testAttitudeFactor.cpp +++ b/gtsam/navigation/tests/testAttitudeFactor.cpp @@ -20,7 +20,6 @@ #include #include -#include #include #include "gtsam/base/Matrix.h" #include diff --git a/gtsam/navigation/tests/testBarometricFactor.cpp b/gtsam/navigation/tests/testBarometricFactor.cpp index 85ca25075..c7e9032fe 100644 --- a/gtsam/navigation/tests/testBarometricFactor.cpp +++ b/gtsam/navigation/tests/testBarometricFactor.cpp @@ -21,8 +21,6 @@ #include #include -#include - using namespace std::placeholders; using namespace std; using namespace gtsam; diff --git a/gtsam/navigation/tests/testGPSFactor.cpp b/gtsam/navigation/tests/testGPSFactor.cpp index 741ec4dfd..cecfbeaad 100644 --- a/gtsam/navigation/tests/testGPSFactor.cpp +++ b/gtsam/navigation/tests/testGPSFactor.cpp @@ -123,9 +123,7 @@ TEST(GPSData, init) { Point3 NED2(N, E, -U); // Estimate initial state - Pose3 T; - Vector3 nV; - boost::tie(T, nV) = GPSFactor::EstimateState(t1, NED1, t2, NED2, 84831.0796); + const auto [T, nV] = GPSFactor::EstimateState(t1, NED1, t2, NED2, 84831.0796); // Check values values EXPECT(assert_equal((Vector )Vector3(29.9575, -29.0564, -1.95993), nV, 1e-4)); diff --git a/gtsam/navigation/tests/testGeographicLib.cpp b/gtsam/navigation/tests/testGeographicLib.cpp index c568c7445..9dcc7e4e8 100644 --- a/gtsam/navigation/tests/testGeographicLib.cpp +++ b/gtsam/navigation/tests/testGeographicLib.cpp @@ -23,7 +23,6 @@ #include #include -#include #include #include @@ -71,7 +70,8 @@ TEST( GeographicLib, UTM) { // Obtained by // http://geographiclib.sourceforge.net/cgi-bin/GeoConvert?input=33.87071+-84.30482000000001&zone=-3&prec=2&option=Submit auto actual = UTMUPS::EncodeZone(zone, northp); - boost::to_upper(actual); + // transform to upper case + std::transform(actual.begin(), actual.end(), actual.begin(), ::toupper); EXPECT(actual=="16N"); EXPECT_DOUBLES_EQUAL(749305.58, x, 1e-2); EXPECT_DOUBLES_EQUAL(3751090.08, y, 1e-2); diff --git a/gtsam/navigation/tests/testImuFactor.cpp b/gtsam/navigation/tests/testImuFactor.cpp index aa2559575..3c8f426cb 100644 --- a/gtsam/navigation/tests/testImuFactor.cpp +++ b/gtsam/navigation/tests/testImuFactor.cpp @@ -30,7 +30,6 @@ #include #include -#include #include #include "imuFactorTesting.h" diff --git a/gtsam/navigation/tests/testMagFactor.cpp b/gtsam/navigation/tests/testMagFactor.cpp index f15e00456..2b5dee4ff 100644 --- a/gtsam/navigation/tests/testMagFactor.cpp +++ b/gtsam/navigation/tests/testMagFactor.cpp @@ -20,8 +20,6 @@ #include #include -#include - #include #include diff --git a/gtsam/navigation/tests/testNavState.cpp b/gtsam/navigation/tests/testNavState.cpp index e658639b0..b8f081518 100644 --- a/gtsam/navigation/tests/testNavState.cpp +++ b/gtsam/navigation/tests/testNavState.cpp @@ -20,7 +20,6 @@ #include #include -#include #include using namespace std::placeholders; diff --git a/gtsam/nonlinear/AdaptAutoDiff.h b/gtsam/nonlinear/AdaptAutoDiff.h index b616a0e63..8f67eb379 100644 --- a/gtsam/nonlinear/AdaptAutoDiff.h +++ b/gtsam/nonlinear/AdaptAutoDiff.h @@ -22,9 +22,6 @@ #include #include -#include -#include - namespace gtsam { /** diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 654c1ad33..8899685de 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -24,13 +24,13 @@ #include #include -#include - namespace gtsam { /* ************************************************************************* */ DoglegParams::VerbosityDL DoglegParams::verbosityDLTranslator(const std::string &verbosityDL) const { - std::string s = verbosityDL; boost::algorithm::to_upper(s); + std::string s = verbosityDL; + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SILENT") return DoglegParams::SILENT; if (s == "VERBOSE") return DoglegParams::VERBOSE; diff --git a/gtsam/nonlinear/Expression-inl.h b/gtsam/nonlinear/Expression-inl.h index 35eef342c..6cc0d408e 100644 --- a/gtsam/nonlinear/Expression-inl.h +++ b/gtsam/nonlinear/Expression-inl.h @@ -19,12 +19,14 @@ #pragma once -#include +// The MSVC compiler workaround for the unsupported variable length array +// utilizes the std::unique_ptr<> custom deleter. +// See Expression::valueAndJacobianMap() below. +#ifdef _MSC_VER +#include +#endif -#include -#include -#include -#include +#include namespace gtsam { @@ -141,9 +143,7 @@ T Expression::value(const Values& values, std::vector* H) const { if (H) { // Call private version that returns derivatives in H - KeyVector keys; - FastVector dims; - boost::tie(keys, dims) = keysAndDims(); + const auto [keys, dims] = keysAndDims(); return valueAndDerivatives(values, keys, dims, *H); } else // no derivatives needed, just return value @@ -207,7 +207,10 @@ T Expression::valueAndJacobianMap(const Values& values, // allocated on Visual Studio. For more information see the issue below // https://bitbucket.org/gtborg/gtsam/issue/178/vlas-unsupported-in-visual-studio #ifdef _MSC_VER - auto traceStorage = static_cast(_aligned_malloc(size, internal::TraceAlignment)); + std::unique_ptr traceStorageDeleter( + _aligned_malloc(size, internal::TraceAlignment), + [](void *ptr){ _aligned_free(ptr); }); + auto traceStorage = static_cast(traceStorageDeleter.get()); #else internal::ExecutionTraceStorage traceStorage[size]; #endif @@ -216,10 +219,6 @@ T Expression::valueAndJacobianMap(const Values& values, T value(this->traceExecution(values, trace, traceStorage)); trace.startReverseAD1(jacobians); -#ifdef _MSC_VER - _aligned_free(traceStorage); -#endif - return value; } @@ -228,9 +227,14 @@ typename Expression::KeysAndDims Expression::keysAndDims() const { std::map map; dims(map); size_t n = map.size(); - KeysAndDims pair = std::make_pair(KeyVector(n), FastVector(n)); - boost::copy(map | boost::adaptors::map_keys, pair.first.begin()); - boost::copy(map | boost::adaptors::map_values, pair.second.begin()); + KeysAndDims pair = {KeyVector(n), FastVector(n)}; + // Copy map into pair of vectors + auto key_it = pair.first.begin(); + auto dim_it = pair.second.begin(); + for (const auto& [key, value] : map) { + *key_it++ = key; + *dim_it++ = value; + } return pair; } diff --git a/gtsam/nonlinear/Expression.h b/gtsam/nonlinear/Expression.h index 6e3c634cc..fb8087133 100644 --- a/gtsam/nonlinear/Expression.h +++ b/gtsam/nonlinear/Expression.h @@ -218,7 +218,7 @@ protected: template class ScalarMultiplyExpression : public Expression { // Check that T is a vector space - BOOST_CONCEPT_ASSERT((gtsam::IsVectorSpace)); + GTSAM_CONCEPT_ASSERT(gtsam::IsVectorSpace); public: explicit ScalarMultiplyExpression(double s, const Expression& e); @@ -231,7 +231,7 @@ class ScalarMultiplyExpression : public Expression { template class BinarySumExpression : public Expression { // Check that T is a vector space - BOOST_CONCEPT_ASSERT((gtsam::IsVectorSpace)); + GTSAM_CONCEPT_ASSERT(gtsam::IsVectorSpace); public: explicit BinarySumExpression(const Expression& e1, const Expression& e2); diff --git a/gtsam/nonlinear/ExpressionFactor.h b/gtsam/nonlinear/ExpressionFactor.h index 8c4e8ce86..269bdf924 100644 --- a/gtsam/nonlinear/ExpressionFactor.h +++ b/gtsam/nonlinear/ExpressionFactor.h @@ -42,7 +42,7 @@ namespace gtsam { */ template class ExpressionFactor : public NoiseModelFactor { - BOOST_CONCEPT_ASSERT((IsTestable)); + GTSAM_CONCEPT_ASSERT(IsTestable); protected: @@ -180,7 +180,7 @@ protected: if (keys_.empty()) { // This is the case when called in ExpressionFactor Constructor. // We then take the keys from the expression in sorted order. - boost::tie(keys_, dims_) = expression_.keysAndDims(); + std::tie(keys_, dims_) = expression_.keysAndDims(); } else { // This happens with classes derived from BinaryExpressionFactor etc. // In that case, the keys_ are already defined and we just need to grab @@ -288,6 +288,7 @@ private: return expression(keys); } +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION friend class boost::serialization::access; template void serialize(ARCHIVE &ar, const unsigned int /*version*/) { @@ -295,6 +296,7 @@ private: "ExpressionFactorN", boost::serialization::base_object>(*this)); } +#endif }; /// traits template diff --git a/gtsam/nonlinear/ExtendedKalmanFilter-inl.h b/gtsam/nonlinear/ExtendedKalmanFilter-inl.h index f252454ef..5bf643c12 100644 --- a/gtsam/nonlinear/ExtendedKalmanFilter-inl.h +++ b/gtsam/nonlinear/ExtendedKalmanFilter-inl.h @@ -35,8 +35,7 @@ namespace gtsam { // Compute the marginal on the last key // Solve the linear factor graph, converting it into a linear Bayes Network // P(x0,x1) = P(x0|x1)*P(x1) - Ordering lastKeyAsOrdering; - lastKeyAsOrdering += lastKey; + const Ordering lastKeyAsOrdering{lastKey}; const GaussianConditional::shared_ptr marginal = linearFactorGraph.marginalMultifrontalBayesNet(lastKeyAsOrdering)->front(); diff --git a/gtsam/nonlinear/ExtendedKalmanFilter.h b/gtsam/nonlinear/ExtendedKalmanFilter.h index e931fc561..a270bc4b1 100644 --- a/gtsam/nonlinear/ExtendedKalmanFilter.h +++ b/gtsam/nonlinear/ExtendedKalmanFilter.h @@ -44,8 +44,8 @@ namespace gtsam { template class ExtendedKalmanFilter { // Check that VALUE type is a testable Manifold - BOOST_CONCEPT_ASSERT((IsTestable)); - BOOST_CONCEPT_ASSERT((IsManifold)); + GTSAM_CONCEPT_ASSERT1(IsTestable); + GTSAM_CONCEPT_ASSERT2(IsManifold); public: typedef std::shared_ptr > shared_ptr; diff --git a/gtsam/nonlinear/ISAM2-impl.cpp b/gtsam/nonlinear/ISAM2-impl.cpp index a809cbc08..20874e2dc 100644 --- a/gtsam/nonlinear/ISAM2-impl.cpp +++ b/gtsam/nonlinear/ISAM2-impl.cpp @@ -22,7 +22,6 @@ #include // for selective linearization thresholds #include -#include #include #include #include diff --git a/gtsam/nonlinear/ISAM2-impl.h b/gtsam/nonlinear/ISAM2-impl.h index 8c0af2c2d..e9a9696eb 100644 --- a/gtsam/nonlinear/ISAM2-impl.h +++ b/gtsam/nonlinear/ISAM2-impl.h @@ -28,17 +28,11 @@ #include #include -#include -#include -namespace br { -using namespace boost::range; -using namespace boost::adaptors; -} // namespace br - #include #include #include #include +#include namespace gtsam { @@ -320,13 +314,14 @@ struct GTSAM_EXPORT UpdateImpl { const ISAM2Params::RelinearizationThreshold& relinearizeThreshold) { KeySet relinKeys; for (const ISAM2::sharedClique& root : roots) { - if (relinearizeThreshold.type() == typeid(double)) + if (std::holds_alternative(relinearizeThreshold)) { CheckRelinearizationRecursiveDouble( - boost::get(relinearizeThreshold), delta, root, &relinKeys); - else if (relinearizeThreshold.type() == typeid(FastMap)) + std::get(relinearizeThreshold), delta, root, &relinKeys); + } else if (std::holds_alternative>(relinearizeThreshold)) { CheckRelinearizationRecursiveMap( - boost::get >(relinearizeThreshold), delta, + std::get >(relinearizeThreshold), delta, root, &relinKeys); + } } return relinKeys; } @@ -347,13 +342,13 @@ struct GTSAM_EXPORT UpdateImpl { const ISAM2Params::RelinearizationThreshold& relinearizeThreshold) { KeySet relinKeys; - if (const double* threshold = boost::get(&relinearizeThreshold)) { + if (const double* threshold = std::get_if(&relinearizeThreshold)) { for (const VectorValues::KeyValuePair& key_delta : delta) { double maxDelta = key_delta.second.lpNorm(); if (maxDelta >= *threshold) relinKeys.insert(key_delta.first); } } else if (const FastMap* thresholds = - boost::get >(&relinearizeThreshold)) { + std::get_if >(&relinearizeThreshold)) { for (const VectorValues::KeyValuePair& key_delta : delta) { const Vector& threshold = thresholds->find(Symbol(key_delta.first).chr())->second; diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index dafe938c6..f4c61e659 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -28,6 +28,7 @@ #include #include #include +#include using namespace std; @@ -38,16 +39,18 @@ template class BayesTree; /* ************************************************************************* */ ISAM2::ISAM2(const ISAM2Params& params) : params_(params), update_count_(0) { - if (params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) + if (std::holds_alternative(params_.optimizationParams)) { doglegDelta_ = - boost::get(params_.optimizationParams).initialDelta; + std::get(params_.optimizationParams).initialDelta; + } } /* ************************************************************************* */ ISAM2::ISAM2() : update_count_(0) { - if (params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) + if (std::holds_alternative(params_.optimizationParams)) { doglegDelta_ = - boost::get(params_.optimizationParams).initialDelta; + std::get(params_.optimizationParams).initialDelta; + } } /* ************************************************************************* */ @@ -176,9 +179,11 @@ void ISAM2::recalculateBatch(const ISAM2UpdateParams& updateParams, gttic(recalculateBatch); gttic(add_keys); - br::copy(variableIndex_ | br::map_keys, - std::inserter(*affectedKeysSet, affectedKeysSet->end())); + // copy the keys from the variableIndex_ to the affectedKeysSet + for (const auto& [key, _] : variableIndex_) { + affectedKeysSet->insert(key); + } // Removed unused keys: VariableIndex affectedFactorsVarIndex = variableIndex_; @@ -289,8 +294,7 @@ void ISAM2::recalculateIncremental(const ISAM2UpdateParams& updateParams, gttic(orphans); // Add the orphaned subtrees for (const auto& orphan : *orphans) - factors += - std::make_shared >(orphan); + factors.emplace_shared >(orphan); gttoc(orphans); // 3. Re-order and eliminate the factor graph into a Bayes net (Algorithm @@ -319,7 +323,7 @@ void ISAM2::recalculateIncremental(const ISAM2UpdateParams& updateParams, const int group = result->observedKeys.size() < affectedFactorsVarIndex.size() ? 1 : 0; for (Key var : result->observedKeys) - constraintGroups.insert(std::make_pair(var, group)); + constraintGroups.emplace(var, group); } // Remove unaffected keys from the constraints @@ -700,10 +704,10 @@ void ISAM2::marginalizeLeaves( // Marked const but actually changes mutable delta void ISAM2::updateDelta(bool forceFullSolve) const { gttic(updateDelta); - if (params_.optimizationParams.type() == typeid(ISAM2GaussNewtonParams)) { + if (std::holds_alternative(params_.optimizationParams)) { // If using Gauss-Newton, update with wildfireThreshold const ISAM2GaussNewtonParams& gaussNewtonParams = - boost::get(params_.optimizationParams); + std::get(params_.optimizationParams); const double effectiveWildfireThreshold = forceFullSolve ? 0.0 : gaussNewtonParams.wildfireThreshold; gttic(Wildfire_update); @@ -711,11 +715,10 @@ void ISAM2::updateDelta(bool forceFullSolve) const { effectiveWildfireThreshold, &delta_); deltaReplacedMask_.clear(); gttoc(Wildfire_update); - - } else if (params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) { + } else if (std::holds_alternative(params_.optimizationParams)) { // If using Dogleg, do a Dogleg step const ISAM2DoglegParams& doglegParams = - boost::get(params_.optimizationParams); + std::get(params_.optimizationParams); const double effectiveWildfireThreshold = forceFullSolve ? 0.0 : doglegParams.wildfireThreshold; diff --git a/gtsam/nonlinear/ISAM2Clique.cpp b/gtsam/nonlinear/ISAM2Clique.cpp index b3dc49342..d4b050f84 100644 --- a/gtsam/nonlinear/ISAM2Clique.cpp +++ b/gtsam/nonlinear/ISAM2Clique.cpp @@ -330,9 +330,7 @@ void ISAM2Clique::addGradientAtZero(VectorValues* g) const { for (auto it = conditional_->begin(); it != conditional_->end(); ++it) { const DenseIndex dim = conditional_->getDim(it); const Vector contribution = gradientContribution_.segment(position, dim); - VectorValues::iterator values_it; - bool success; - std::tie(values_it, success) = g->tryInsert(*it, contribution); + auto [values_it, success] = g->tryInsert(*it, contribution); if (!success) values_it->second += contribution; position += dim; } diff --git a/gtsam/nonlinear/ISAM2Params.cpp b/gtsam/nonlinear/ISAM2Params.cpp index c768ce427..ec9d5b773 100644 --- a/gtsam/nonlinear/ISAM2Params.cpp +++ b/gtsam/nonlinear/ISAM2Params.cpp @@ -16,7 +16,6 @@ */ #include -#include using namespace std; @@ -46,7 +45,8 @@ DoglegOptimizerImpl::TrustRegionAdaptationMode ISAM2DoglegParams::adaptationModeTranslator( const string& adaptationMode) const { string s = adaptationMode; - boost::algorithm::to_upper(s); + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SEARCH_EACH_ITERATION") return DoglegOptimizerImpl::SEARCH_EACH_ITERATION; if (s == "ONE_STEP_PER_ITERATION") @@ -60,7 +60,8 @@ ISAM2DoglegParams::adaptationModeTranslator( ISAM2Params::Factorization ISAM2Params::factorizationTranslator( const string& str) { string s = str; - boost::algorithm::to_upper(s); + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "QR") return ISAM2Params::QR; if (s == "CHOLESKY") return ISAM2Params::CHOLESKY; diff --git a/gtsam/nonlinear/ISAM2Params.h b/gtsam/nonlinear/ISAM2Params.h index e35340377..bc79cd456 100644 --- a/gtsam/nonlinear/ISAM2Params.h +++ b/gtsam/nonlinear/ISAM2Params.h @@ -21,8 +21,9 @@ #include #include -#include + #include +#include namespace gtsam { @@ -133,10 +134,10 @@ struct GTSAM_EXPORT ISAM2DoglegParams { typedef FastMap ISAM2ThresholdMap; typedef ISAM2ThresholdMap::value_type ISAM2ThresholdMapValue; struct GTSAM_EXPORT ISAM2Params { - typedef boost::variant + typedef std::variant OptimizationParams; ///< Either ISAM2GaussNewtonParams or ///< ISAM2DoglegParams - typedef boost::variant > + typedef std::variant > RelinearizationThreshold; ///< Either a constant relinearization ///< threshold or a per-variable-type set of ///< thresholds @@ -254,20 +255,21 @@ struct GTSAM_EXPORT ISAM2Params { cout << str << "\n"; static const std::string kStr("optimizationParams: "); - if (optimizationParams.type() == typeid(ISAM2GaussNewtonParams)) - boost::get(optimizationParams).print(); - else if (optimizationParams.type() == typeid(ISAM2DoglegParams)) - boost::get(optimizationParams).print(kStr); - else + if (std::holds_alternative(optimizationParams)) { + std::get(optimizationParams).print(); + } else if (std::holds_alternative(optimizationParams)) { + std::get(optimizationParams).print(kStr); + } else { cout << kStr << "{unknown type}\n"; + } cout << "relinearizeThreshold: "; - if (relinearizeThreshold.type() == typeid(double)) { - cout << boost::get(relinearizeThreshold) << "\n"; + if (std::holds_alternative(relinearizeThreshold)) { + cout << std::get(relinearizeThreshold) << "\n"; } else { cout << "{mapped}\n"; for (const ISAM2ThresholdMapValue& value : - boost::get(relinearizeThreshold)) { + std::get(relinearizeThreshold)) { cout << " '" << value.first << "' -> [" << value.second.transpose() << " ]\n"; } diff --git a/gtsam/nonlinear/ISAM2Result.h b/gtsam/nonlinear/ISAM2Result.h index 26049f9da..ef594f6f3 100644 --- a/gtsam/nonlinear/ISAM2Result.h +++ b/gtsam/nonlinear/ISAM2Result.h @@ -27,8 +27,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 5cbed2a1f..922ba9129 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -26,14 +26,14 @@ #include #include #include +#ifdef GTSAM_USE_BOOST_FEATURES #include - -#include -#include +#endif #include #include #include +#include #include #include @@ -41,7 +41,6 @@ using namespace std; namespace gtsam { -using boost::adaptors::map_values; typedef internal::LevenbergMarquardtState State; /* ************************************************************************* */ @@ -64,7 +63,8 @@ LevenbergMarquardtOptimizer::LevenbergMarquardtOptimizer(const NonlinearFactorGr /* ************************************************************************* */ void LevenbergMarquardtOptimizer::initTime() { - startTime_ = boost::posix_time::microsec_clock::universal_time(); + // use chrono to measure time in microseconds + startTime_ = std::chrono::high_resolution_clock::now(); } /* ************************************************************************* */ @@ -106,9 +106,12 @@ inline void LevenbergMarquardtOptimizer::writeLogFile(double currentError){ if (!params_.logFile.empty()) { ofstream os(params_.logFile.c_str(), ios::app); - boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); + // use chrono to measure time in microseconds + auto currentTime = std::chrono::high_resolution_clock::now(); + // Get the time spent in seconds and print it + double timeSpent = std::chrono::duration_cast(currentTime - startTime_).count() / 1e6; os << /*inner iterations*/ currentState->totalNumberInnerIterations << "," - << 1e-6 * (currentTime - startTime_).total_microseconds() << "," + << timeSpent << "," << /*current error*/ currentError << "," << currentState->lambda << "," << /*outer iterations*/ currentState->iterations << endl; } @@ -120,6 +123,7 @@ bool LevenbergMarquardtOptimizer::tryLambda(const GaussianFactorGraph& linear, auto currentState = static_cast(state_.get()); bool verbose = (params_.verbosityLM >= LevenbergMarquardtParams::TRYLAMBDA); +#ifdef GTSAM_USE_BOOST_FEATURES #ifdef GTSAM_USING_NEW_BOOST_TIMERS boost::timer::cpu_timer lamda_iteration_timer; lamda_iteration_timer.start(); @@ -127,6 +131,9 @@ bool LevenbergMarquardtOptimizer::tryLambda(const GaussianFactorGraph& linear, boost::timer lamda_iteration_timer; lamda_iteration_timer.restart(); #endif +#else + auto start = std::chrono::high_resolution_clock::now(); +#endif if (verbose) cout << "trying lambda = " << currentState->lambda << endl; @@ -214,20 +221,24 @@ bool LevenbergMarquardtOptimizer::tryLambda(const GaussianFactorGraph& linear, } // if (systemSolvedSuccessfully) if (params_.verbosityLM == LevenbergMarquardtParams::SUMMARY) { +#ifdef GTSAM_USE_BOOST_FEATURES // do timing #ifdef GTSAM_USING_NEW_BOOST_TIMERS double iterationTime = 1e-9 * lamda_iteration_timer.elapsed().wall; #else double iterationTime = lamda_iteration_timer.elapsed(); #endif - if (currentState->iterations == 0) +#else + auto end = std::chrono::high_resolution_clock::now(); + double iterationTime = std::chrono::duration_cast(end - start).count() / 1e6; +#endif + if (currentState->iterations == 0) { cout << "iter cost cost_change lambda success iter_time" << endl; - - cout << boost::format("% 4d % 8e % 3.2e % 3.2e % 4d % 3.2e") % currentState->iterations % - newError % costChange % currentState->lambda % systemSolvedSuccessfully % - iterationTime << endl; + } + cout << setw(4) << currentState->iterations << " " << setw(8) << newError << " " << setw(3) << setprecision(2) + << costChange << " " << setw(3) << setprecision(2) << currentState->lambda << " " << setw(4) + << systemSolvedSuccessfully << " " << setw(3) << setprecision(2) << iterationTime << endl; } - if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity // NOTE(frank): As we return immediately after this, we move the newValues @@ -281,8 +292,8 @@ GaussianFactorGraph::shared_ptr LevenbergMarquardtOptimizer::iterate() { VectorValues sqrtHessianDiagonal; if (params_.diagonalDamping) { sqrtHessianDiagonal = linear->hessianDiagonal(); - for (Vector& v : sqrtHessianDiagonal | map_values) { - v = v.cwiseMax(params_.minDiagonal).cwiseMin(params_.maxDiagonal).cwiseSqrt(); + for (auto& [key, value] : sqrtHessianDiagonal) { + value = value.cwiseMax(params_.minDiagonal).cwiseMin(params_.maxDiagonal).cwiseSqrt(); } } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index b6b981d65..826eed5d6 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include class NonlinearOptimizerMoreOptimizationTest; @@ -36,7 +36,9 @@ class GTSAM_EXPORT LevenbergMarquardtOptimizer: public NonlinearOptimizer { protected: const LevenbergMarquardtParams params_; ///< LM parameters - boost::posix_time::ptime startTime_; + + // startTime_ is a chrono time point + std::chrono::time_point startTime_; ///< time when optimization started void initTime(); diff --git a/gtsam/nonlinear/LevenbergMarquardtParams.cpp b/gtsam/nonlinear/LevenbergMarquardtParams.cpp index caa2cc083..39b773173 100644 --- a/gtsam/nonlinear/LevenbergMarquardtParams.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtParams.cpp @@ -19,8 +19,6 @@ */ #include -#include -#include #include #include @@ -32,7 +30,8 @@ namespace gtsam { LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTranslator( const std::string &src) { std::string s = src; - boost::algorithm::to_upper(s); + // convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SILENT") return LevenbergMarquardtParams::SILENT; if (s == "SUMMARY") diff --git a/gtsam/nonlinear/LinearContainerFactor.cpp b/gtsam/nonlinear/LinearContainerFactor.cpp index 57136214d..160423a64 100644 --- a/gtsam/nonlinear/LinearContainerFactor.cpp +++ b/gtsam/nonlinear/LinearContainerFactor.cpp @@ -213,7 +213,7 @@ NonlinearFactorGraph LinearContainerFactor::ConvertLinearGraph( result.reserve(linear_graph.size()); for (const auto& f : linear_graph) if (f) - result += std::make_shared(f, linearizationPoint); + result.emplace_shared(f, linearizationPoint); return result; } diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 54db42b79..0b1a43815 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -63,9 +63,7 @@ NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOpt } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - Values newValues; - int dummy; - boost::tie(newValues, dummy) = nonlinearConjugateGradient( + const auto [newValues, dummy] = nonlinearConjugateGradient( System(graph_), state_->values, params_, true /* single iteration */); state_.reset(new State(newValues, graph_.error(newValues), state_->iterations + 1)); @@ -76,9 +74,7 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence System system(graph_); - Values newValues; - int iterations; - boost::tie(newValues, iterations) = + const auto [newValues, iterations] = nonlinearConjugateGradient(system, state_->values, params_, false); state_.reset(new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 780d41635..3dd6c7e05 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -20,7 +20,6 @@ #include #include -#include namespace gtsam { @@ -145,7 +144,7 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) { * The last parameter is a switch between gradient-descent and conjugate gradient */ template -boost::tuple nonlinearConjugateGradient(const S &system, +std::tuple nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) { @@ -160,7 +159,7 @@ boost::tuple nonlinearConjugateGradient(const S &system, std::cout << "Exiting, as error = " << currentError << " < " << params.errorTol << std::endl; } - return boost::tie(initial, iteration); + return {initial, iteration}; } V currentValues = initial; @@ -218,7 +217,7 @@ boost::tuple nonlinearConjugateGradient(const S &system, << "nonlinearConjugateGradient: Terminating because reached maximum iterations" << std::endl; - return boost::tie(currentValues, iteration); + return {currentValues, iteration}; } } // \ namespace gtsam diff --git a/gtsam/nonlinear/NonlinearEquality.h b/gtsam/nonlinear/NonlinearEquality.h index c8ab0c2af..f81486b30 100644 --- a/gtsam/nonlinear/NonlinearEquality.h +++ b/gtsam/nonlinear/NonlinearEquality.h @@ -21,8 +21,6 @@ #include #include -#include - #include #include #include @@ -346,6 +344,7 @@ class NonlinearEquality2 : public NoiseModelFactorN { GTSAM_MAKE_ALIGNED_OPERATOR_NEW private: +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /// Serialization function friend class boost::serialization::access; template @@ -354,6 +353,7 @@ class NonlinearEquality2 : public NoiseModelFactorN { ar& boost::serialization::make_nvp( "NoiseModelFactor2", boost::serialization::base_object(*this)); } +#endif }; // \NonlinearEquality2 diff --git a/gtsam/nonlinear/NonlinearFactor.cpp b/gtsam/nonlinear/NonlinearFactor.cpp index eac65d587..7312bf6d9 100644 --- a/gtsam/nonlinear/NonlinearFactor.cpp +++ b/gtsam/nonlinear/NonlinearFactor.cpp @@ -18,7 +18,6 @@ #include #include -#include namespace gtsam { @@ -96,12 +95,12 @@ NoiseModelFactor::shared_ptr NoiseModelFactor::cloneWithNewNoiseModel( /* ************************************************************************* */ static void check(const SharedNoiseModel& noiseModel, size_t m) { - if (noiseModel && m != noiseModel->dim()) + if (noiseModel && m != noiseModel->dim()) { throw std::invalid_argument( - boost::str( - boost::format( - "NoiseModelFactor: NoiseModel has dimension %1% instead of %2%.") - % noiseModel->dim() % m)); + "NoiseModelFactor: NoiseModel has dimension " + + std::to_string(noiseModel->dim()) + + " instead of " + std::to_string(m) + "."); + } } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/NonlinearFactor.h b/gtsam/nonlinear/NonlinearFactor.h index d22409875..e8ae9fd47 100644 --- a/gtsam/nonlinear/NonlinearFactor.h +++ b/gtsam/nonlinear/NonlinearFactor.h @@ -26,7 +26,7 @@ #include #include #include -#include // boost::index_sequence +#include #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include @@ -609,7 +609,7 @@ protected: Vector unwhitenedError( const Values& x, OptionalMatrixVecType H = nullptr) const override { - return unwhitenedError(boost::mp11::index_sequence_for{}, x, + return unwhitenedError(gtsam::index_sequence_for{}, x, H); } @@ -702,7 +702,7 @@ protected: */ template inline Vector unwhitenedError( - boost::mp11::index_sequence, // + gtsam::index_sequence, // const Values& x, OptionalMatrixVecType H = nullptr) const { if (this->active(x)) { diff --git a/gtsam/nonlinear/NonlinearFactorGraph.cpp b/gtsam/nonlinear/NonlinearFactorGraph.cpp index f8aaf4f39..6e57a2cf5 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.cpp +++ b/gtsam/nonlinear/NonlinearFactorGraph.cpp @@ -199,9 +199,9 @@ SymbolicFactorGraph::shared_ptr NonlinearFactorGraph::symbolic() const for (const sharedFactor& factor: factors_) { if(factor) - *symbolic += SymbolicFactor(*factor); + symbolic->push_back(SymbolicFactor(*factor)); else - *symbolic += SymbolicFactorGraph::sharedFactor(); + symbolic->push_back(SymbolicFactorGraph::sharedFactor()); } return symbolic; @@ -265,11 +265,11 @@ GaussianFactorGraph::shared_ptr NonlinearFactorGraph::linearize(const Values& li linearFG->reserve(size()); // linearize all factors - for(const sharedFactor& factor: factors_) { - if(factor) { - (*linearFG) += factor->linearize(linearizationPoint); + for (const sharedFactor& factor : factors_) { + if (factor) { + linearFG->push_back(factor->linearize(linearizationPoint)); } else - (*linearFG) += GaussianFactor::shared_ptr(); + linearFG->push_back(GaussianFactor::shared_ptr()); } #endif diff --git a/gtsam/nonlinear/NonlinearFactorGraph.h b/gtsam/nonlinear/NonlinearFactorGraph.h index 92bfdaf20..cf2565725 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.h +++ b/gtsam/nonlinear/NonlinearFactorGraph.h @@ -189,7 +189,7 @@ namespace gtsam { template void addExpressionFactor(const SharedNoiseModel& R, const T& z, const Expression& h) { - push_back(std::make_shared >(R, z, h)); + this->emplace_shared>(R, z, h); } /** diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 8aa611976..7ca5ead95 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -27,8 +27,6 @@ #include -#include - #include #include #include diff --git a/gtsam/nonlinear/NonlinearOptimizerParams.cpp b/gtsam/nonlinear/NonlinearOptimizerParams.cpp index 6fc532bf6..671dbe34d 100644 --- a/gtsam/nonlinear/NonlinearOptimizerParams.cpp +++ b/gtsam/nonlinear/NonlinearOptimizerParams.cpp @@ -9,7 +9,6 @@ */ #include -#include namespace gtsam { @@ -17,7 +16,8 @@ namespace gtsam { NonlinearOptimizerParams::Verbosity NonlinearOptimizerParams::verbosityTranslator( const std::string &src) { std::string s = src; - boost::algorithm::to_upper(s); + // Convert to upper case + std::transform(s.begin(), s.end(), s.begin(), ::toupper); if (s == "SILENT") return NonlinearOptimizerParams::SILENT; if (s == "ERROR") diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index f509fe534..61ea6684a 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -25,11 +25,8 @@ #pragma once #include -#include #include -#include - #include // Only so Eclipse finds class definition namespace gtsam { diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index ba0252c98..75cfc24e0 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -105,8 +105,11 @@ namespace gtsam { typedef KeyValuePair value_type; + /// @name Constructors + /// @{ + /** Default constructor creates an empty Values class */ - Values() {} + Values() = default; /** Copy constructor duplicates all keys and values */ Values(const Values& other); @@ -124,6 +127,7 @@ namespace gtsam { /** Construct from a Values and an update vector: identical to other.retract(delta) */ Values(const Values& other, const VectorValues& delta); + /// @} /// @name Testable /// @{ @@ -134,6 +138,8 @@ namespace gtsam { bool equals(const Values& other, double tol=1e-9) const; /// @} + /// @name Standard Interface + /// @{ /** Retrieve a variable by key \c j. The type of the value associated with * this key is supplied as a template argument to this function. @@ -174,6 +180,42 @@ namespace gtsam { /** whether the config is empty */ bool empty() const { return values_.empty(); } + /// @} + /// @name Iterator + /// @{ + + struct deref_iterator { + using const_iterator_type = typename KeyValueMap::const_iterator; + const_iterator_type it_; + deref_iterator(const_iterator_type it) : it_(it) {} + ConstKeyValuePair operator*() const { return {it_->first, *(it_->second)}; } + std::unique_ptr operator->() { + return std::make_unique(it_->first, *(it_->second)); + } + bool operator==(const deref_iterator& other) const { + return it_ == other.it_; + } + bool operator!=(const deref_iterator& other) const { return it_ != other.it_; } + deref_iterator& operator++() { + ++it_; + return *this; + } + }; + + deref_iterator begin() const { return deref_iterator(values_.begin()); } + deref_iterator end() const { return deref_iterator(values_.end()); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + deref_iterator find(Key j) const { return deref_iterator(values_.find(j)); } + + /** Find the element greater than or equal to the specified key. */ + deref_iterator lower_bound(Key j) const { return deref_iterator(values_.lower_bound(j)); } + + /** Find the lowest-ordered element greater than the specified key. */ + deref_iterator upper_bound(Key j) const { return deref_iterator(values_.upper_bound(j)); } + + /// @} /// @name Manifold Operations /// @{ @@ -296,7 +338,8 @@ namespace gtsam { // supplied \c filter function. template static bool filterHelper(const std::function filter, const ConstKeyValuePair& key_value) { - BOOST_STATIC_ASSERT((!boost::is_same::value)); + // static_assert if ValueType is type: Value + static_assert(!std::is_same::value, "ValueType must not be type: Value to use this filter"); // Filter and check the type return filter(key_value.key) && (dynamic_cast*>(&key_value.value)); } diff --git a/gtsam/nonlinear/internal/ExecutionTrace.h b/gtsam/nonlinear/internal/ExecutionTrace.h index 17557ba3a..d65c99880 100644 --- a/gtsam/nonlinear/internal/ExecutionTrace.h +++ b/gtsam/nonlinear/internal/ExecutionTrace.h @@ -17,16 +17,17 @@ */ #pragma once + #include // Configuration from CMake #include #include #include -#include - #include + #include #include +#include namespace gtsam { namespace internal { @@ -36,8 +37,8 @@ template struct CallRecord; /// Storage type for the execution trace. /// It enforces the proper alignment in a portable way. /// Provide a traceSize() sized array of this type to traceExecution as traceStorage. -static const unsigned TraceAlignment = 32; -typedef boost::aligned_storage<1, TraceAlignment>::type ExecutionTraceStorage; +static const unsigned TraceAlignment = 16; // 16 bytes is the default alignment used by Eigen. +typedef std::aligned_storage<1, TraceAlignment>::type ExecutionTraceStorage; template struct UseBlockIf { diff --git a/gtsam/nonlinear/internal/ExpressionNode.h b/gtsam/nonlinear/internal/ExpressionNode.h index cf30b1f65..878b2b9d8 100644 --- a/gtsam/nonlinear/internal/ExpressionNode.h +++ b/gtsam/nonlinear/internal/ExpressionNode.h @@ -38,7 +38,7 @@ T & upAlign(T & value, unsigned requiredAlignment = TraceAlignment) { // right now only word sized types are supported. // Easy to extend if needed, // by somehow inferring the unsigned integer of same size - BOOST_STATIC_ASSERT(sizeof(T) == sizeof(size_t)); + static_assert(sizeof(T) == sizeof(size_t)); size_t & uiValue = reinterpret_cast(value); size_t misAlignment = uiValue % requiredAlignment; if (misAlignment) { @@ -559,7 +559,7 @@ public: template class ScalarMultiplyNode : public ExpressionNode { // Check that T is a vector space - BOOST_CONCEPT_ASSERT((gtsam::IsVectorSpace)); + GTSAM_CONCEPT_ASSERT(gtsam::IsVectorSpace); double scalar_; std::shared_ptr > expression_; diff --git a/gtsam/nonlinear/internal/LevenbergMarquardtState.h b/gtsam/nonlinear/internal/LevenbergMarquardtState.h index aa0a3daa3..be1afc972 100644 --- a/gtsam/nonlinear/internal/LevenbergMarquardtState.h +++ b/gtsam/nonlinear/internal/LevenbergMarquardtState.h @@ -131,7 +131,7 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { const Key& key = key_dim.first; const size_t& dim = key_dim.second; const CachedModel* item = getCachedModel(dim); - damped += std::make_shared(key, item->A, item->b, item->model); + damped.emplace_shared(key, item->A, item->b, item->model); } return damped; } @@ -147,7 +147,7 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { const size_t dim = key_vector.second.size(); CachedModel* item = getCachedModel(dim); item->A.diagonal() = sqrtHessianDiagonal.at(key); // use diag(hessian) - damped += std::make_shared(key, item->A, item->b, item->model); + damped.emplace_shared(key, item->A, item->b, item->model); } catch (const std::out_of_range&) { continue; // Don't attempt any damping if no key found in diagonal } diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 3e3373f61..e7a05cf62 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -135,230 +135,6 @@ virtual class NoiseModelFactor : gtsam::NonlinearFactor { Vector whitenedError(const gtsam::Values& x) const; }; -#include -class Values { - Values(); - Values(const gtsam::Values& other); - - size_t size() const; - bool empty() const; - void clear(); - size_t dim() const; - - void print(string s = "", const gtsam::KeyFormatter& keyFormatter = - gtsam::DefaultKeyFormatter) const; - bool equals(const gtsam::Values& other, double tol) const; - - void insert(const gtsam::Values& values); - void update(const gtsam::Values& values); - void insert_or_assign(const gtsam::Values& values); - void erase(size_t j); - void swap(gtsam::Values& values); - - bool exists(size_t j) const; - gtsam::KeyVector keys() const; - - gtsam::VectorValues zeroVectors() const; - - gtsam::Values retract(const gtsam::VectorValues& delta) const; - gtsam::VectorValues localCoordinates(const gtsam::Values& cp) const; - - // enabling serialization functionality - void serialize() const; - - // New in 4.0, we have to specialize every insert/update/at to generate - // wrappers Instead of the old: void insert(size_t j, const gtsam::Value& - // value); void update(size_t j, const gtsam::Value& val); gtsam::Value - // at(size_t j) const; - - // The order is important: Vector has to precede Point2/Point3 so `atVector` - // can work for those fixed-size vectors. - void insert(size_t j, Vector vector); - void insert(size_t j, Matrix matrix); - void insert(size_t j, const gtsam::Point2& point2); - void insert(size_t j, const gtsam::Point3& point3); - void insert(size_t j, const gtsam::Rot2& rot2); - void insert(size_t j, const gtsam::Pose2& pose2); - void insert(size_t j, const gtsam::SO3& R); - void insert(size_t j, const gtsam::SO4& Q); - void insert(size_t j, const gtsam::SOn& P); - void insert(size_t j, const gtsam::Rot3& rot3); - void insert(size_t j, const gtsam::Pose3& pose3); - void insert(size_t j, const gtsam::Unit3& unit3); - void insert(size_t j, const gtsam::Cal3_S2& cal3_s2); - void insert(size_t j, const gtsam::Cal3DS2& cal3ds2); - void insert(size_t j, const gtsam::Cal3Bundler& cal3bundler); - void insert(size_t j, const gtsam::Cal3Fisheye& cal3fisheye); - void insert(size_t j, const gtsam::Cal3Unified& cal3unified); - void insert(size_t j, const gtsam::EssentialMatrix& essential_matrix); - void insert(size_t j, const gtsam::PinholeCamera& camera); - void insert(size_t j, const gtsam::PinholeCamera& camera); - void insert(size_t j, const gtsam::PinholeCamera& camera); - void insert(size_t j, const gtsam::PinholeCamera& camera); - void insert(size_t j, const gtsam::PinholePose& camera); - void insert(size_t j, const gtsam::PinholePose& camera); - void insert(size_t j, const gtsam::PinholePose& camera); - void insert(size_t j, const gtsam::PinholePose& camera); - void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); - void insert(size_t j, const gtsam::NavState& nav_state); - void insert(size_t j, double c); - void insert(size_t j, const gtsam::ParameterMatrix<1>& X); - void insert(size_t j, const gtsam::ParameterMatrix<2>& X); - void insert(size_t j, const gtsam::ParameterMatrix<3>& X); - void insert(size_t j, const gtsam::ParameterMatrix<4>& X); - void insert(size_t j, const gtsam::ParameterMatrix<5>& X); - void insert(size_t j, const gtsam::ParameterMatrix<6>& X); - void insert(size_t j, const gtsam::ParameterMatrix<7>& X); - void insert(size_t j, const gtsam::ParameterMatrix<8>& X); - void insert(size_t j, const gtsam::ParameterMatrix<9>& X); - void insert(size_t j, const gtsam::ParameterMatrix<10>& X); - void insert(size_t j, const gtsam::ParameterMatrix<11>& X); - void insert(size_t j, const gtsam::ParameterMatrix<12>& X); - void insert(size_t j, const gtsam::ParameterMatrix<13>& X); - void insert(size_t j, const gtsam::ParameterMatrix<14>& X); - void insert(size_t j, const gtsam::ParameterMatrix<15>& X); - - template - void insert(size_t j, const T& val); - - void update(size_t j, const gtsam::Point2& point2); - void update(size_t j, const gtsam::Point3& point3); - void update(size_t j, const gtsam::Rot2& rot2); - void update(size_t j, const gtsam::Pose2& pose2); - void update(size_t j, const gtsam::SO3& R); - void update(size_t j, const gtsam::SO4& Q); - void update(size_t j, const gtsam::SOn& P); - void update(size_t j, const gtsam::Rot3& rot3); - void update(size_t j, const gtsam::Pose3& pose3); - void update(size_t j, const gtsam::Unit3& unit3); - void update(size_t j, const gtsam::Cal3_S2& cal3_s2); - void update(size_t j, const gtsam::Cal3DS2& cal3ds2); - void update(size_t j, const gtsam::Cal3Bundler& cal3bundler); - void update(size_t j, const gtsam::Cal3Fisheye& cal3fisheye); - void update(size_t j, const gtsam::Cal3Unified& cal3unified); - void update(size_t j, const gtsam::EssentialMatrix& essential_matrix); - void update(size_t j, const gtsam::PinholeCamera& camera); - void update(size_t j, const gtsam::PinholeCamera& camera); - void update(size_t j, const gtsam::PinholeCamera& camera); - void update(size_t j, const gtsam::PinholeCamera& camera); - void update(size_t j, const gtsam::PinholePose& camera); - void update(size_t j, const gtsam::PinholePose& camera); - void update(size_t j, const gtsam::PinholePose& camera); - void update(size_t j, const gtsam::PinholePose& camera); - void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); - void update(size_t j, const gtsam::NavState& nav_state); - void update(size_t j, Vector vector); - void update(size_t j, Matrix matrix); - void update(size_t j, double c); - void update(size_t j, const gtsam::ParameterMatrix<1>& X); - void update(size_t j, const gtsam::ParameterMatrix<2>& X); - void update(size_t j, const gtsam::ParameterMatrix<3>& X); - void update(size_t j, const gtsam::ParameterMatrix<4>& X); - void update(size_t j, const gtsam::ParameterMatrix<5>& X); - void update(size_t j, const gtsam::ParameterMatrix<6>& X); - void update(size_t j, const gtsam::ParameterMatrix<7>& X); - void update(size_t j, const gtsam::ParameterMatrix<8>& X); - void update(size_t j, const gtsam::ParameterMatrix<9>& X); - void update(size_t j, const gtsam::ParameterMatrix<10>& X); - void update(size_t j, const gtsam::ParameterMatrix<11>& X); - void update(size_t j, const gtsam::ParameterMatrix<12>& X); - void update(size_t j, const gtsam::ParameterMatrix<13>& X); - void update(size_t j, const gtsam::ParameterMatrix<14>& X); - void update(size_t j, const gtsam::ParameterMatrix<15>& X); - - void insert_or_assign(size_t j, const gtsam::Point2& point2); - void insert_or_assign(size_t j, const gtsam::Point3& point3); - void insert_or_assign(size_t j, const gtsam::Rot2& rot2); - void insert_or_assign(size_t j, const gtsam::Pose2& pose2); - void insert_or_assign(size_t j, const gtsam::SO3& R); - void insert_or_assign(size_t j, const gtsam::SO4& Q); - void insert_or_assign(size_t j, const gtsam::SOn& P); - void insert_or_assign(size_t j, const gtsam::Rot3& rot3); - void insert_or_assign(size_t j, const gtsam::Pose3& pose3); - void insert_or_assign(size_t j, const gtsam::Unit3& unit3); - void insert_or_assign(size_t j, const gtsam::Cal3_S2& cal3_s2); - void insert_or_assign(size_t j, const gtsam::Cal3DS2& cal3ds2); - void insert_or_assign(size_t j, const gtsam::Cal3Bundler& cal3bundler); - void insert_or_assign(size_t j, const gtsam::Cal3Fisheye& cal3fisheye); - void insert_or_assign(size_t j, const gtsam::Cal3Unified& cal3unified); - void insert_or_assign(size_t j, const gtsam::EssentialMatrix& essential_matrix); - void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); - void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); - void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); - void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); - void insert_or_assign(size_t j, const gtsam::PinholePose& camera); - void insert_or_assign(size_t j, const gtsam::PinholePose& camera); - void insert_or_assign(size_t j, const gtsam::PinholePose& camera); - void insert_or_assign(size_t j, const gtsam::PinholePose& camera); - void insert_or_assign(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); - void insert_or_assign(size_t j, const gtsam::NavState& nav_state); - void insert_or_assign(size_t j, Vector vector); - void insert_or_assign(size_t j, Matrix matrix); - void insert_or_assign(size_t j, double c); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<1>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<2>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<3>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<4>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<5>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<6>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<7>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<8>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<9>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<10>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<11>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<12>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<13>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<14>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<15>& X); - - template , - gtsam::PinholeCamera, - gtsam::PinholeCamera, - gtsam::PinholeCamera, - gtsam::PinholePose, - gtsam::PinholePose, - gtsam::PinholePose, - gtsam::PinholePose, - gtsam::imuBias::ConstantBias, - gtsam::NavState, - Vector, - Matrix, - double, - gtsam::ParameterMatrix<1>, - gtsam::ParameterMatrix<2>, - gtsam::ParameterMatrix<3>, - gtsam::ParameterMatrix<4>, - gtsam::ParameterMatrix<5>, - gtsam::ParameterMatrix<6>, - gtsam::ParameterMatrix<7>, - gtsam::ParameterMatrix<8>, - gtsam::ParameterMatrix<9>, - gtsam::ParameterMatrix<10>, - gtsam::ParameterMatrix<11>, - gtsam::ParameterMatrix<12>, - gtsam::ParameterMatrix<13>, - gtsam::ParameterMatrix<14>, - gtsam::ParameterMatrix<15>}> - T at(size_t j); -}; - #include class Marginals { Marginals(const gtsam::NonlinearFactorGraph& graph, @@ -582,6 +358,7 @@ virtual class DoglegOptimizer : gtsam::NonlinearOptimizer { double getDelta() const; }; +// TODO(dellaert): This will only work when GTSAM_USE_BOOST_FEATURES is true. #include template virtual class GncOptimizer { diff --git a/gtsam/nonlinear/nonlinearExceptions.h b/gtsam/nonlinear/nonlinearExceptions.h index 7b704ac39..e36b72fb7 100644 --- a/gtsam/nonlinear/nonlinearExceptions.h +++ b/gtsam/nonlinear/nonlinearExceptions.h @@ -17,7 +17,6 @@ */ #pragma once -#include #include #include diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index 02a3374e4..1af18ef95 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -25,7 +25,6 @@ #include #include -#include #include #include #include @@ -194,11 +193,31 @@ TEST(Values, basic_functions) values.insert(6, M1); values.insert(8, M2); + EXPECT(!values.exists(1)); EXPECT(values.exists(2)); EXPECT(values.exists(4)); EXPECT(values.exists(6)); EXPECT(values.exists(8)); + + size_t count = 0; + for (const auto& [key, value] : values) { + count += 1; + if (key == 2 || key == 4) EXPECT_LONGS_EQUAL(3, value.dim()); + if (key == 6 || key == 8) EXPECT_LONGS_EQUAL(6, value.dim()); + } + EXPECT_LONGS_EQUAL(4, count); + + // find + EXPECT_LONGS_EQUAL(4, values.find(4)->key); + + // lower_bound + EXPECT_LONGS_EQUAL(4, values.lower_bound(4)->key); + EXPECT_LONGS_EQUAL(4, values.lower_bound(3)->key); + + // upper_bound + EXPECT_LONGS_EQUAL(6, values.upper_bound(4)->key); + EXPECT_LONGS_EQUAL(4, values.upper_bound(3)->key); } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/values.i b/gtsam/nonlinear/values.i new file mode 100644 index 000000000..680cafc31 --- /dev/null +++ b/gtsam/nonlinear/values.i @@ -0,0 +1,265 @@ +//************************************************************************* +// nonlinear but only Values +//************************************************************************* + +namespace gtsam { + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +class Values { + Values(); + Values(const gtsam::Values& other); + + size_t size() const; + bool empty() const; + void clear(); + size_t dim() const; + + void print(string s = "", const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; + bool equals(const gtsam::Values& other, double tol) const; + + void insert(const gtsam::Values& values); + void update(const gtsam::Values& values); + void insert_or_assign(const gtsam::Values& values); + void erase(size_t j); + void swap(gtsam::Values& values); + + bool exists(size_t j) const; + gtsam::KeyVector keys() const; + + gtsam::VectorValues zeroVectors() const; + + gtsam::Values retract(const gtsam::VectorValues& delta) const; + gtsam::VectorValues localCoordinates(const gtsam::Values& cp) const; + + // enabling serialization functionality + void serialize() const; + + // New in 4.0, we have to specialize every insert/update/at to generate + // wrappers Instead of the old: void insert(size_t j, const gtsam::Value& + // value); void update(size_t j, const gtsam::Value& val); gtsam::Value + // at(size_t j) const; + + // The order is important: Vector has to precede Point2/Point3 so `atVector` + // can work for those fixed-size vectors. + void insert(size_t j, Vector vector); + void insert(size_t j, Matrix matrix); + void insert(size_t j, const gtsam::Point2& point2); + void insert(size_t j, const gtsam::Point3& point3); + void insert(size_t j, const gtsam::Rot2& rot2); + void insert(size_t j, const gtsam::Pose2& pose2); + void insert(size_t j, const gtsam::SO3& R); + void insert(size_t j, const gtsam::SO4& Q); + void insert(size_t j, const gtsam::SOn& P); + void insert(size_t j, const gtsam::Rot3& rot3); + void insert(size_t j, const gtsam::Pose3& pose3); + void insert(size_t j, const gtsam::Unit3& unit3); + void insert(size_t j, const gtsam::Cal3_S2& cal3_s2); + void insert(size_t j, const gtsam::Cal3DS2& cal3ds2); + void insert(size_t j, const gtsam::Cal3Bundler& cal3bundler); + void insert(size_t j, const gtsam::Cal3Fisheye& cal3fisheye); + void insert(size_t j, const gtsam::Cal3Unified& cal3unified); + void insert(size_t j, const gtsam::EssentialMatrix& essential_matrix); + void insert(size_t j, const gtsam::PinholeCamera& camera); + void insert(size_t j, const gtsam::PinholeCamera& camera); + void insert(size_t j, const gtsam::PinholeCamera& camera); + void insert(size_t j, const gtsam::PinholeCamera& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); + void insert(size_t j, const gtsam::NavState& nav_state); + void insert(size_t j, double c); + void insert(size_t j, const gtsam::ParameterMatrix<1>& X); + void insert(size_t j, const gtsam::ParameterMatrix<2>& X); + void insert(size_t j, const gtsam::ParameterMatrix<3>& X); + void insert(size_t j, const gtsam::ParameterMatrix<4>& X); + void insert(size_t j, const gtsam::ParameterMatrix<5>& X); + void insert(size_t j, const gtsam::ParameterMatrix<6>& X); + void insert(size_t j, const gtsam::ParameterMatrix<7>& X); + void insert(size_t j, const gtsam::ParameterMatrix<8>& X); + void insert(size_t j, const gtsam::ParameterMatrix<9>& X); + void insert(size_t j, const gtsam::ParameterMatrix<10>& X); + void insert(size_t j, const gtsam::ParameterMatrix<11>& X); + void insert(size_t j, const gtsam::ParameterMatrix<12>& X); + void insert(size_t j, const gtsam::ParameterMatrix<13>& X); + void insert(size_t j, const gtsam::ParameterMatrix<14>& X); + void insert(size_t j, const gtsam::ParameterMatrix<15>& X); + + template + void insert(size_t j, const T& val); + + void update(size_t j, const gtsam::Point2& point2); + void update(size_t j, const gtsam::Point3& point3); + void update(size_t j, const gtsam::Rot2& rot2); + void update(size_t j, const gtsam::Pose2& pose2); + void update(size_t j, const gtsam::SO3& R); + void update(size_t j, const gtsam::SO4& Q); + void update(size_t j, const gtsam::SOn& P); + void update(size_t j, const gtsam::Rot3& rot3); + void update(size_t j, const gtsam::Pose3& pose3); + void update(size_t j, const gtsam::Unit3& unit3); + void update(size_t j, const gtsam::Cal3_S2& cal3_s2); + void update(size_t j, const gtsam::Cal3DS2& cal3ds2); + void update(size_t j, const gtsam::Cal3Bundler& cal3bundler); + void update(size_t j, const gtsam::Cal3Fisheye& cal3fisheye); + void update(size_t j, const gtsam::Cal3Unified& cal3unified); + void update(size_t j, const gtsam::EssentialMatrix& essential_matrix); + void update(size_t j, const gtsam::PinholeCamera& camera); + void update(size_t j, const gtsam::PinholeCamera& camera); + void update(size_t j, const gtsam::PinholeCamera& camera); + void update(size_t j, const gtsam::PinholeCamera& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); + void update(size_t j, const gtsam::NavState& nav_state); + void update(size_t j, Vector vector); + void update(size_t j, Matrix matrix); + void update(size_t j, double c); + void update(size_t j, const gtsam::ParameterMatrix<1>& X); + void update(size_t j, const gtsam::ParameterMatrix<2>& X); + void update(size_t j, const gtsam::ParameterMatrix<3>& X); + void update(size_t j, const gtsam::ParameterMatrix<4>& X); + void update(size_t j, const gtsam::ParameterMatrix<5>& X); + void update(size_t j, const gtsam::ParameterMatrix<6>& X); + void update(size_t j, const gtsam::ParameterMatrix<7>& X); + void update(size_t j, const gtsam::ParameterMatrix<8>& X); + void update(size_t j, const gtsam::ParameterMatrix<9>& X); + void update(size_t j, const gtsam::ParameterMatrix<10>& X); + void update(size_t j, const gtsam::ParameterMatrix<11>& X); + void update(size_t j, const gtsam::ParameterMatrix<12>& X); + void update(size_t j, const gtsam::ParameterMatrix<13>& X); + void update(size_t j, const gtsam::ParameterMatrix<14>& X); + void update(size_t j, const gtsam::ParameterMatrix<15>& X); + + void insert_or_assign(size_t j, const gtsam::Point2& point2); + void insert_or_assign(size_t j, const gtsam::Point3& point3); + void insert_or_assign(size_t j, const gtsam::Rot2& rot2); + void insert_or_assign(size_t j, const gtsam::Pose2& pose2); + void insert_or_assign(size_t j, const gtsam::SO3& R); + void insert_or_assign(size_t j, const gtsam::SO4& Q); + void insert_or_assign(size_t j, const gtsam::SOn& P); + void insert_or_assign(size_t j, const gtsam::Rot3& rot3); + void insert_or_assign(size_t j, const gtsam::Pose3& pose3); + void insert_or_assign(size_t j, const gtsam::Unit3& unit3); + void insert_or_assign(size_t j, const gtsam::Cal3_S2& cal3_s2); + void insert_or_assign(size_t j, const gtsam::Cal3DS2& cal3ds2); + void insert_or_assign(size_t j, const gtsam::Cal3Bundler& cal3bundler); + void insert_or_assign(size_t j, const gtsam::Cal3Fisheye& cal3fisheye); + void insert_or_assign(size_t j, const gtsam::Cal3Unified& cal3unified); + void insert_or_assign(size_t j, + const gtsam::EssentialMatrix& essential_matrix); + void insert_or_assign(size_t j, + const gtsam::PinholeCamera& camera); + void insert_or_assign(size_t j, + const gtsam::PinholeCamera& camera); + void insert_or_assign(size_t j, + const gtsam::PinholeCamera& camera); + void insert_or_assign(size_t j, + const gtsam::PinholeCamera& camera); + void insert_or_assign(size_t j, + const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, + const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, + const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, + const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, + const gtsam::imuBias::ConstantBias& constant_bias); + void insert_or_assign(size_t j, const gtsam::NavState& nav_state); + void insert_or_assign(size_t j, Vector vector); + void insert_or_assign(size_t j, Matrix matrix); + void insert_or_assign(size_t j, double c); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<1>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<2>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<3>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<4>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<5>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<6>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<7>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<8>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<9>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<10>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<11>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<12>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<13>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<14>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix<15>& X); + + template , + gtsam::PinholeCamera, + gtsam::PinholeCamera, + gtsam::PinholeCamera, + gtsam::PinholePose, + gtsam::PinholePose, + gtsam::PinholePose, + gtsam::PinholePose, + gtsam::imuBias::ConstantBias, + gtsam::NavState, + Vector, + Matrix, + double, + gtsam::ParameterMatrix<1>, + gtsam::ParameterMatrix<2>, + gtsam::ParameterMatrix<3>, + gtsam::ParameterMatrix<4>, + gtsam::ParameterMatrix<5>, + gtsam::ParameterMatrix<6>, + gtsam::ParameterMatrix<7>, + gtsam::ParameterMatrix<8>, + gtsam::ParameterMatrix<9>, + gtsam::ParameterMatrix<10>, + gtsam::ParameterMatrix<11>, + gtsam::ParameterMatrix<12>, + gtsam::ParameterMatrix<13>, + gtsam::ParameterMatrix<14>, + gtsam::ParameterMatrix<15>}> + T at(size_t j); +}; + +} // namespace gtsam diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index a1e6251b5..d5467be47 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -21,7 +21,6 @@ #include #include -#include namespace gtsam { diff --git a/gtsam/sam/tests/testRangeFactor.cpp b/gtsam/sam/tests/testRangeFactor.cpp index 6a71cbd2c..f45349c6b 100644 --- a/gtsam/sam/tests/testRangeFactor.cpp +++ b/gtsam/sam/tests/testRangeFactor.cpp @@ -25,7 +25,6 @@ #include #include -#include using namespace std::placeholders; using namespace std; diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h index 1f991a05a..debb27bc1 100644 --- a/gtsam/sfm/BinaryMeasurement.h +++ b/gtsam/sfm/BinaryMeasurement.h @@ -35,7 +35,7 @@ namespace gtsam { template class BinaryMeasurement : public Factor { // Check that T type is testable - BOOST_CONCEPT_ASSERT((IsTestable)); + GTSAM_CONCEPT_ASSERT(IsTestable); public: // shorthand for a smart pointer to a measurement diff --git a/gtsam/sfm/DsfTrackGenerator.cpp b/gtsam/sfm/DsfTrackGenerator.cpp index e82880193..6880138d9 100644 --- a/gtsam/sfm/DsfTrackGenerator.cpp +++ b/gtsam/sfm/DsfTrackGenerator.cpp @@ -20,6 +20,7 @@ #include #include +#include namespace gtsam { diff --git a/gtsam/sfm/MFAS.cpp b/gtsam/sfm/MFAS.cpp index 913752d8a..9e7214046 100644 --- a/gtsam/sfm/MFAS.cpp +++ b/gtsam/sfm/MFAS.cpp @@ -116,7 +116,7 @@ MFAS::MFAS(const TranslationEdges& relativeTranslations, // Iterate over edges, obtain weights by projecting // their relativeTranslations along the projection direction for (const auto& measurement : relativeTranslations) { - edgeWeights_[std::make_pair(measurement.key1(), measurement.key2())] = + edgeWeights_[{measurement.key1(), measurement.key2()}] = measurement.measured().dot(projectionDirection); } } diff --git a/gtsam/sfm/SfmTrack.h b/gtsam/sfm/SfmTrack.h index 6ef1f9512..2b494ed7c 100644 --- a/gtsam/sfm/SfmTrack.h +++ b/gtsam/sfm/SfmTrack.h @@ -18,7 +18,6 @@ #pragma once -#include #include #include diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index f8cd9fc9c..8ace9d98c 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -179,10 +179,8 @@ ShonanAveraging::createOptimizerAt(size_t p, const Values &initial) const { // Anchor prior is added here as depends on initial value (and cost is zero) if (parameters_.alpha > 0) { - size_t i; - Rot value; const size_t dim = SOn::Dimension(p); - std::tie(i, value) = parameters_.anchor; + const auto [i, value] = parameters_.anchor; auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha); graph.emplace_shared>(i, SOn::Lift(p, value.matrix()), model); @@ -755,7 +753,7 @@ std::pair ShonanAveraging::computeMinEigenVector( const Values &values) const { Vector minEigenVector; double minEigenValue = computeMinEigenValue(values, &minEigenVector); - return std::make_pair(minEigenValue, minEigenVector); + return {minEigenValue, minEigenVector}; } /* ************************************************************************* */ @@ -908,7 +906,7 @@ std::pair ShonanAveraging::run(const Values &initialEstimate, "When using robust norm, Shonan only tests a single rank. Set pMin = pMax"); } const Values SO3Values = roundSolution(Qstar); - return std::make_pair(SO3Values, 0); + return {SO3Values, 0}; } else { // Check certificate of global optimality Vector minEigenVector; @@ -916,7 +914,7 @@ std::pair ShonanAveraging::run(const Values &initialEstimate, if (minEigenValue > parameters_.optimalityThreshold) { // If at global optimum, round and return solution const Values SO3Values = roundSolution(Qstar); - return std::make_pair(SO3Values, minEigenValue); + return {SO3Values, minEigenValue}; } // Not at global optimimum yet, so check whether we will go to next level diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index 3e5b154e3..ee533c215 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -40,8 +40,8 @@ namespace gtsam { class BetweenFactor: public NoiseModelFactorN { // Check that VALUE type is a testable Lie group - BOOST_CONCEPT_ASSERT((IsTestable)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsTestable); + GTSAM_CONCEPT_ASSERT2(IsLieGroup); public: diff --git a/gtsam/slam/GeneralSFMFactor.h b/gtsam/slam/GeneralSFMFactor.h index 5605ad58e..763f3666d 100644 --- a/gtsam/slam/GeneralSFMFactor.h +++ b/gtsam/slam/GeneralSFMFactor.h @@ -36,7 +36,6 @@ #include #include -#include #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include #endif diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index 4bec99665..ba9e0dd6b 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -27,8 +27,6 @@ #include #include -#include - #include using namespace std; diff --git a/gtsam/slam/InitializePose3.h b/gtsam/slam/InitializePose3.h index bf86ab6f2..3f69217dc 100644 --- a/gtsam/slam/InitializePose3.h +++ b/gtsam/slam/InitializePose3.h @@ -21,7 +21,6 @@ #pragma once #include -#include #include #include #include diff --git a/gtsam/slam/JacobianFactorQR.h b/gtsam/slam/JacobianFactorQR.h index 5d59f3c62..9c01670ed 100644 --- a/gtsam/slam/JacobianFactorQR.h +++ b/gtsam/slam/JacobianFactorQR.h @@ -44,11 +44,9 @@ public: //gfg.print("gfg"); // eliminate the point - std::shared_ptr bn; - GaussianFactorGraph::shared_ptr fg; KeyVector variables; variables.push_back(pointKey); - boost::tie(bn, fg) = gfg.eliminatePartialSequential(variables, EliminateQR); + const auto [bn, fg] = gfg.eliminatePartialSequential(variables, EliminateQR); //fg->print("fg"); JacobianFactor::operator=(JacobianFactor(*fg)); diff --git a/gtsam/slam/RegularImplicitSchurFactor.h b/gtsam/slam/RegularImplicitSchurFactor.h index 9f48d01aa..924aaa1cf 100644 --- a/gtsam/slam/RegularImplicitSchurFactor.h +++ b/gtsam/slam/RegularImplicitSchurFactor.h @@ -148,7 +148,7 @@ public: std::pair jacobian() const override { throw std::runtime_error( "RegularImplicitSchurFactor::jacobian non implemented"); - return std::make_pair(Matrix(), Vector()); + return {Matrix(), Vector()}; } /// *Compute* full augmented information matrix diff --git a/gtsam/slam/SmartFactorBase.h b/gtsam/slam/SmartFactorBase.h index 80dde3563..e0540cc41 100644 --- a/gtsam/slam/SmartFactorBase.h +++ b/gtsam/slam/SmartFactorBase.h @@ -243,7 +243,7 @@ protected: * to the matrices and vectors that will be used to store the results instead * of pointers. */ - template + template> Vector unwhitenedError( const Cameras& cameras, const POINT& point, OptArgs&&... optArgs) const { diff --git a/gtsam/slam/TriangulationFactor.h b/gtsam/slam/TriangulationFactor.h index 6feaa7041..32aa590a6 100644 --- a/gtsam/slam/TriangulationFactor.h +++ b/gtsam/slam/TriangulationFactor.h @@ -19,7 +19,6 @@ #include #include -#include namespace gtsam { @@ -86,7 +85,7 @@ public: if (model && model->dim() != traits::dimension) throw std::invalid_argument( "TriangulationFactor must be created with " - + boost::lexical_cast((int) traits::dimension) + + std::to_string((int) traits::dimension) + "-dimensional noise model."); } diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 9633c5ea0..b5b8303fb 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -41,34 +41,42 @@ #include #include -#include -#include #include #include #include #include #include +#include + +#if defined(__GNUC__) && (__GNUC__ == 7) +#include +namespace fs = std::experimental::filesystem; +#else +#include +namespace fs = std::filesystem; +#endif -using namespace std; -namespace fs = boost::filesystem; using gtsam::symbol_shorthand::L; +using std::cout; +using std::endl; + #define LINESIZE 81920 namespace gtsam { /* ************************************************************************* */ -string findExampleDataFile(const string &name) { +std::string findExampleDataFile(const std::string &name) { // Search source tree and installed location - vector rootsToSearch; + std::vector rootsToSearch; // Constants below are defined by CMake, see gtsam/gtsam/CMakeLists.txt rootsToSearch.push_back(GTSAM_SOURCE_TREE_DATASET_DIR); rootsToSearch.push_back(GTSAM_INSTALLED_DATASET_DIR); // Search for filename as given, and with .graph and .txt extensions - vector namesToSearch; + std::vector namesToSearch; namesToSearch.push_back(name); namesToSearch.push_back(name + ".graph"); namesToSearch.push_back(name + ".txt"); @@ -85,7 +93,7 @@ string findExampleDataFile(const string &name) { } // If we did not return already, then we did not find the file - throw invalid_argument( + throw std::invalid_argument( "gtsam::findExampleDataFile could not find a matching " "file in\n" GTSAM_SOURCE_TREE_DATASET_DIR " or\n" GTSAM_INSTALLED_DATASET_DIR " named\n" + @@ -94,10 +102,10 @@ string findExampleDataFile(const string &name) { } /* ************************************************************************* */ -string createRewrittenFileName(const string &name) { +std::string createRewrittenFileName(const std::string &name) { // Search source tree and installed location if (!exists(fs::path(name))) { - throw invalid_argument( + throw std::invalid_argument( "gtsam::createRewrittenFileName could not find a matching file in\n" + name); } @@ -113,17 +121,17 @@ string createRewrittenFileName(const string &name) { // Type for parser functions used in parseLines below. template using Parser = - std::function(istream &is, const string &tag)>; + std::function(std::istream &is, const std::string &tag)>; // Parse a file by calling the parse(is, tag) function for every line. // The result of calling the function is ignored, so typically parse function // works via a side effect, like adding a factor into a graph etc. template -static void parseLines(const string &filename, Parser parse) { - ifstream is(filename.c_str()); +static void parseLines(const std::string &filename, Parser parse) { + std::ifstream is(filename.c_str()); if (!is) - throw invalid_argument("parse: can not find file " + filename); - string tag; + throw std::invalid_argument("parse: can not find file " + filename); + std::string tag; while (is >> tag) { parse(is, tag); // ignore return value is.ignore(LINESIZE, '\n'); @@ -133,10 +141,10 @@ static void parseLines(const string &filename, Parser parse) { /* ************************************************************************* */ // Parse types T into a size_t-indexed map template -map parseToMap(const string &filename, Parser> parse, +std::map parseToMap(const std::string &filename, Parser> parse, size_t maxIndex) { - map result; - Parser> emplace = [&](istream &is, const string &tag) { + std::map result; + Parser> emplace = [&](std::istream &is, const std::string &tag) { if (auto t = parse(is, tag)) { if (!maxIndex || t->first <= maxIndex) result.emplace(*t); @@ -150,9 +158,9 @@ map parseToMap(const string &filename, Parser> parse, /* ************************************************************************* */ // Parse a file and push results on a vector template -static vector parseToVector(const string &filename, Parser parse) { - vector result; - Parser add = [&result, parse](istream &is, const string &tag) { +static std::vector parseToVector(const std::string &filename, Parser parse) { + std::vector result; + Parser add = [&result, parse](std::istream &is, const std::string &tag) { if (auto t = parse(is, tag)) result.push_back(*t); return std::nullopt; @@ -162,7 +170,7 @@ static vector parseToVector(const string &filename, Parser parse) { } /* ************************************************************************* */ -std::optional parseVertexPose(istream &is, const string &tag) { +std::optional parseVertexPose(std::istream &is, const std::string &tag) { if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { size_t id; double x, y, yaw; @@ -182,8 +190,8 @@ GTSAM_EXPORT std::map parseVariables( } /* ************************************************************************* */ -std::optional parseVertexLandmark(istream &is, - const string &tag) { +std::optional parseVertexLandmark(std::istream &is, + const std::string &tag) { if (tag == "VERTEX_XY") { size_t id; double x, y; @@ -232,7 +240,7 @@ static SharedNoiseModel createNoiseModel( // v(1)' v(3) v(4); // v(2)' v(4)' v(5) ] if (v(0) == 0.0 || v(3) == 0.0 || v(5) == 0.0) - throw runtime_error( + throw std::runtime_error( "load2D::readNoiseModel looks like this is not G2O matrix order"); M << v(0), v(1), v(2), v(1), v(3), v(4), v(2), v(4), v(5); break; @@ -244,12 +252,12 @@ static SharedNoiseModel createNoiseModel( // v(1)' v(2) v(5); // v(4)' v(5)' v(3) ] if (v(0) == 0.0 || v(2) == 0.0 || v(3) == 0.0) - throw invalid_argument( + throw std::invalid_argument( "load2D::readNoiseModel looks like this is not TORO matrix order"); M << v(0), v(1), v(4), v(1), v(2), v(5), v(4), v(5), v(3); break; default: - throw runtime_error("load2D: invalid noise format"); + throw std::runtime_error("load2D: invalid noise format"); } // Now, create a Gaussian noise model @@ -267,7 +275,7 @@ static SharedNoiseModel createNoiseModel( model = noiseModel::Gaussian::Covariance(M, smart); break; default: - throw invalid_argument("load2D: invalid noise format"); + throw std::invalid_argument("load2D: invalid noise format"); } switch (kernelFunctionType) { @@ -283,12 +291,12 @@ static SharedNoiseModel createNoiseModel( noiseModel::mEstimator::Tukey::Create(4.6851), model); break; default: - throw invalid_argument("load2D: invalid kernel function type"); + throw std::invalid_argument("load2D: invalid kernel function type"); } } /* ************************************************************************* */ -std::optional parseEdge(istream &is, const string &tag) { +std::optional parseEdge(std::istream &is, const std::string &tag) { if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || (tag == "ODOMETRY")) { @@ -316,7 +324,7 @@ template struct ParseFactor : ParseMeasurement { // We parse a measurement then convert typename std::optional::shared_ptr> - operator()(istream &is, const string &tag) { + operator()(std::istream &is, const std::string &tag) { if (auto m = ParseMeasurement::operator()(is, tag)) return std::make_shared>( m->key1(), m->key2(), m->measured(), m->noiseModel()); @@ -341,8 +349,8 @@ template <> struct ParseMeasurement { SharedNoiseModel model; // The actual parser - std::optional> operator()(istream &is, - const string &tag) { + std::optional> operator()(std::istream &is, + const std::string &tag) { auto edge = parseEdge(is, tag); if (!edge) return std::nullopt; @@ -353,7 +361,7 @@ template <> struct ParseMeasurement { // optional filter size_t id1, id2; - tie(id1, id2) = edge->first; + std::tie(id1, id2) = edge->first; if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) return std::nullopt; @@ -375,7 +383,7 @@ template <> struct ParseMeasurement { std::shared_ptr createSampler(const SharedNoiseModel &model) { auto noise = std::dynamic_pointer_cast(model); if (!noise) - throw invalid_argument("gtsam::load: invalid noise model for adding noise" + throw std::invalid_argument("gtsam::load: invalid noise model for adding noise" "(current version assumes diagonal noise model)!"); return std::shared_ptr(new Sampler(noise)); } @@ -447,7 +455,7 @@ template <> struct ParseMeasurement { // The actual parser std::optional> - operator()(istream &is, const string &tag) { + operator()(std::istream &is, const std::string &tag) { if (tag != "BR" && tag != "LANDMARK") return std::nullopt; @@ -497,14 +505,14 @@ template <> struct ParseMeasurement { }; /* ************************************************************************* */ -GraphAndValues load2D(const string &filename, SharedNoiseModel model, +GraphAndValues load2D(const std::string &filename, SharedNoiseModel model, size_t maxIndex, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { // Single pass for poses and landmarks. auto initial = std::make_shared(); - Parser insert = [maxIndex, &initial](istream &is, const string &tag) { + Parser insert = [maxIndex, &initial](std::istream &is, const std::string &tag) { if (auto indexedPose = parseVertexPose(is, tag)) { if (!maxIndex || indexedPose->first <= maxIndex) initial->insert(indexedPose->first, indexedPose->second); @@ -529,7 +537,7 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, // Combine in a single parser that adds factors to `graph`, but also inserts // new variables into `initial` when needed. - Parser parse = [&](istream &is, const string &tag) { + Parser parse = [&](std::istream &is, const std::string &tag) { if (auto f = parseBetweenFactor(is, tag)) { graph->push_back(*f); @@ -560,19 +568,20 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, parseLines(filename, parse); - return make_pair(graph, initial); + return {graph, initial}; } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, size_t maxIndex, - bool addNoise, bool smart, NoiseFormat noiseFormat, +GraphAndValues load2D(std::pair dataset, + size_t maxIndex, bool addNoise, bool smart, + NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { return load2D(dataset.first, dataset.second, maxIndex, addNoise, smart, noiseFormat, kernelFunctionType); } /* ************************************************************************* */ -GraphAndValues load2D_robust(const string &filename, +GraphAndValues load2D_robust(const std::string &filename, const noiseModel::Base::shared_ptr &model, size_t maxIndex) { return load2D(filename, model, maxIndex); @@ -581,9 +590,9 @@ GraphAndValues load2D_robust(const string &filename, /* ************************************************************************* */ void save2D(const NonlinearFactorGraph &graph, const Values &config, const noiseModel::Diagonal::shared_ptr model, - const string &filename) { + const std::string &filename) { - fstream stream(filename.c_str(), fstream::out); + std::fstream stream(filename.c_str(), std::fstream::out); // save poses for (const auto &key_pose : config.extract()) { @@ -612,7 +621,7 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, } /* ************************************************************************* */ -GraphAndValues readG2o(const string &g2oFile, const bool is3D, +GraphAndValues readG2o(const std::string &g2oFile, const bool is3D, KernelFunctionType kernelFunctionType) { if (is3D) { return load3D(g2oFile); @@ -628,8 +637,8 @@ GraphAndValues readG2o(const string &g2oFile, const bool is3D, /* ************************************************************************* */ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, - const string &filename) { - fstream stream(filename.c_str(), fstream::out); + const std::string &filename) { + std::fstream stream(filename.c_str(), std::fstream::out); // Use a lambda here to more easily modify behavior in future. auto index = [](gtsam::Key key) { return Symbol(key).index(); }; @@ -674,7 +683,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, std::dynamic_pointer_cast(model); if (!gaussianModel) { model->print("model\n"); - throw invalid_argument("writeG2o: invalid noise model!"); + throw std::invalid_argument("writeG2o: invalid noise model!"); } Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); @@ -698,7 +707,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, std::dynamic_pointer_cast(model); if (!gaussianModel) { model->print("model\n"); - throw invalid_argument("writeG2o: invalid noise model!"); + throw std::invalid_argument("writeG2o: invalid noise model!"); } Matrix6 Info = gaussianModel->R().transpose() * gaussianModel->R(); const Pose3 pose3D = factor3D->measured(); @@ -729,7 +738,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, /* ************************************************************************* */ // parse quaternion in x,y,z,w order, and normalize to unit length -istream &operator>>(istream &is, Quaternion &q) { +std::istream &operator>>(std::istream &is, Quaternion &q) { double x, y, z, w; is >> x >> y >> z >> w; const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm; @@ -739,7 +748,7 @@ istream &operator>>(istream &is, Quaternion &q) { /* ************************************************************************* */ // parse Rot3 from roll, pitch, yaw -istream &operator>>(istream &is, Rot3 &R) { +std::istream &operator>>(std::istream &is, Rot3 &R) { double yaw, pitch, roll; is >> roll >> pitch >> yaw; // notice order ! R = Rot3::Ypr(yaw, pitch, roll); @@ -747,20 +756,20 @@ istream &operator>>(istream &is, Rot3 &R) { } /* ************************************************************************* */ -std::optional> parseVertexPose3(istream &is, - const string &tag) { +std::optional> parseVertexPose3(std::istream &is, + const std::string &tag) { if (tag == "VERTEX3") { size_t id; double x, y, z; Rot3 R; is >> id >> x >> y >> z >> R; - return make_pair(id, Pose3(R, {x, y, z})); + return std::make_pair(id, Pose3(R, {x, y, z})); } else if (tag == "VERTEX_SE3:QUAT") { size_t id; double x, y, z; Quaternion q; is >> id >> x >> y >> z >> q; - return make_pair(id, Pose3(q, {x, y, z})); + return std::make_pair(id, Pose3(q, {x, y, z})); } else return std::nullopt; } @@ -772,13 +781,13 @@ GTSAM_EXPORT std::map parseVariables( } /* ************************************************************************* */ -std::optional> parseVertexPoint3(istream &is, - const string &tag) { +std::optional> parseVertexPoint3(std::istream &is, + const std::string &tag) { if (tag == "VERTEX_TRACKXYZ") { size_t id; double x, y, z; is >> id >> x >> y >> z; - return make_pair(id, Point3(x, y, z)); + return std::make_pair(id, Point3(x, y, z)); } else return std::nullopt; } @@ -791,7 +800,7 @@ GTSAM_EXPORT std::map parseVariables( /* ************************************************************************* */ // Parse a symmetric covariance matrix (onlyupper-triangular part is stored) -istream &operator>>(istream &is, Matrix6 &m) { +std::istream &operator>>(std::istream &is, Matrix6 &m) { for (size_t i = 0; i < 6; i++) for (size_t j = i; j < 6; j++) { is >> m(i, j); @@ -808,8 +817,8 @@ template <> struct ParseMeasurement { size_t maxIndex; // The actual parser - std::optional> operator()(istream &is, - const string &tag) { + std::optional> operator()(std::istream &is, + const std::string &tag) { if (tag != "EDGE3" && tag != "EDGE_SE3:QUAT") return std::nullopt; @@ -913,7 +922,7 @@ parseFactors(const std::string &filename, } /* ************************************************************************* */ -GraphAndValues load3D(const string &filename) { +GraphAndValues load3D(const std::string &filename) { auto graph = std::make_shared(); auto initial = std::make_shared(); @@ -922,7 +931,7 @@ GraphAndValues load3D(const string &filename) { // Single pass for variables and factors. Unlike 2D version, does *not* insert // variables into `initial` if referenced but not present. - Parser parse = [&](istream &is, const string &tag) { + Parser parse = [&](std::istream &is, const std::string &tag) { if (auto indexedPose = parseVertexPose3(is, tag)) { initial->insert(indexedPose->first, indexedPose->second); } else if (auto indexedLandmark = parseVertexPoint3(is, tag)) { @@ -934,7 +943,7 @@ GraphAndValues load3D(const string &filename) { }; parseLines(filename, parse); - return make_pair(graph, initial); + return {graph, initial}; } // Wrapper-friendly versions of parseFactors and parseFactors diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index ad1bb40ef..1ecaade54 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/gtsam/slam/lago.cpp b/gtsam/slam/lago.cpp index cb9d98218..645c5ea91 100644 --- a/gtsam/slam/lago.cpp +++ b/gtsam/slam/lago.cpp @@ -19,19 +19,24 @@ #include #include -#include #include -#include +#include #include +#include #include +#include -#include +#include +#include +#include using namespace std; namespace gtsam { namespace lago { +using initialize::kAnchorKey; + static const Matrix I = I_1x1; static const Matrix I3 = I_3x3; @@ -49,7 +54,7 @@ static const noiseModel::Diagonal::shared_ptr priorPose2Noise = * The root is assumed to have orientation zero. */ static double computeThetaToRoot(const Key nodeKey, - const PredecessorMap& tree, const key2doubleMap& deltaThetaMap, + const PredecessorMap& tree, const key2doubleMap& deltaThetaMap, const key2doubleMap& thetaFromRootMap) { double nodeTheta = 0; @@ -75,20 +80,19 @@ static double computeThetaToRoot(const Key nodeKey, /* ************************************************************************* */ key2doubleMap computeThetasToRoot(const key2doubleMap& deltaThetaMap, - const PredecessorMap& tree) { + const PredecessorMap& tree) { key2doubleMap thetaToRootMap; - // Orientation of the roo - thetaToRootMap.insert(pair(initialize::kAnchorKey, 0.0)); + // Orientation of the root + thetaToRootMap.emplace(kAnchorKey, 0.0); // for all nodes in the tree - for(const key2doubleMap::value_type& it: deltaThetaMap) { + for(const auto& [nodeKey, _]: deltaThetaMap) { // compute the orientation wrt root - Key nodeKey = it.first; - double nodeTheta = computeThetaToRoot(nodeKey, tree, deltaThetaMap, + const double nodeTheta = computeThetaToRoot(nodeKey, tree, deltaThetaMap, thetaToRootMap); - thetaToRootMap.insert(pair(nodeKey, nodeTheta)); + thetaToRootMap.emplace(nodeKey, nodeTheta); } return thetaToRootMap; } @@ -97,7 +101,7 @@ key2doubleMap computeThetasToRoot(const key2doubleMap& deltaThetaMap, void getSymbolicGraph( /*OUTPUTS*/vector& spanningTreeIds, vector& chordsIds, key2doubleMap& deltaThetaMap, - /*INPUTS*/const PredecessorMap& tree, const NonlinearFactorGraph& g) { + /*INPUTS*/const PredecessorMap& tree, const NonlinearFactorGraph& g) { // Get keys for which you want the orientation size_t id = 0; @@ -161,7 +165,7 @@ static void getDeltaThetaAndNoise(NonlinearFactor::shared_ptr factor, GaussianFactorGraph buildLinearOrientationGraph( const vector& spanningTreeIds, const vector& chordsIds, const NonlinearFactorGraph& g, const key2doubleMap& orientationsToRoot, - const PredecessorMap& tree) { + const PredecessorMap& tree) { GaussianFactorGraph lagoGraph; Vector deltaTheta; @@ -174,7 +178,7 @@ GaussianFactorGraph buildLinearOrientationGraph( getDeltaThetaAndNoise(g[factorId], deltaTheta, model_deltaTheta); lagoGraph.add(key1, -I, key2, I, deltaTheta, model_deltaTheta); } - // put regularized measurements in the chordsIds + // put regularized measurements in the chords for(const size_t& factorId: chordsIds) { const KeyVector& keys = g[factorId]->keys(); Key key1 = keys[0], key2 = keys[1]; @@ -183,23 +187,23 @@ GaussianFactorGraph buildLinearOrientationGraph( ///cout << "REG: key1= " << DefaultKeyFormatter(key1) << " key2= " << DefaultKeyFormatter(key2) << endl; double k2pi_noise = key1_DeltaTheta_key2 + orientationsToRoot.at(key1) - orientationsToRoot.at(key2); // this coincides to summing up measurements along the cycle induced by the chord - double k = boost::math::round(k2pi_noise / (2 * M_PI)); + double k = std::round(k2pi_noise / (2 * M_PI)); //if (k2pi_noise - 2*k*M_PI > 1e-5) cout << k2pi_noise - 2*k*M_PI << endl; // for debug Vector deltaThetaRegularized = (Vector(1) << key1_DeltaTheta_key2 - 2 * k * M_PI).finished(); lagoGraph.add(key1, -I, key2, I, deltaThetaRegularized, model_deltaTheta); } // prior on the anchor orientation - lagoGraph.add(initialize::kAnchorKey, I, (Vector(1) << 0.0).finished(), priorOrientationNoise); + lagoGraph.add(kAnchorKey, I, (Vector(1) << 0.0).finished(), priorOrientationNoise); return lagoGraph; } /* ************************************************************************* */ -static PredecessorMap findOdometricPath( +static PredecessorMap findOdometricPath( const NonlinearFactorGraph& pose2Graph) { - PredecessorMap tree; - Key minKey = initialize::kAnchorKey; // this initialization does not matter + PredecessorMap tree; + Key minKey = kAnchorKey; // this initialization does not matter bool minUnassigned = true; for(const std::shared_ptr& factor: pose2Graph) { @@ -211,42 +215,77 @@ static PredecessorMap findOdometricPath( minUnassigned = false; } if (key2 - key1 == 1) { // consecutive keys - tree.insert(key2, key1); + tree.emplace(key2, key1); if (key1 < minKey) minKey = key1; } } - tree.insert(minKey, initialize::kAnchorKey); - tree.insert(initialize::kAnchorKey, initialize::kAnchorKey); // root + tree.emplace(minKey, kAnchorKey); + tree.emplace(kAnchorKey, kAnchorKey); // root return tree; } +/*****************************************************************************/ +PredecessorMap findMinimumSpanningTree( + const NonlinearFactorGraph& pose2Graph) { + // Compute the minimum spanning tree + const auto edgeWeights = std::vector(pose2Graph.size(), 1.0); + const auto mstEdgeIndices = + utils::kruskal(pose2Graph, edgeWeights); + + // Create a PredecessorMap 'predecessorMap' such that: + // predecessorMap[key2] = key1, where key1 is the 'parent' node for key2 in + // the spanning tree + PredecessorMap predecessorMap; + std::map visitationMap; + std::stack> stack; + + stack.push({kAnchorKey, kAnchorKey}); + while (!stack.empty()) { + auto [u, parent] = stack.top(); + stack.pop(); + if (visitationMap[u]) continue; + visitationMap[u] = true; + predecessorMap[u] = parent; + for (const auto& edgeIdx : mstEdgeIndices) { + const auto v = pose2Graph[edgeIdx]->front(); + const auto w = pose2Graph[edgeIdx]->back(); + if ((v == u || w == u) && !visitationMap[v == u ? w : v]) { + stack.push({v == u ? w : v, u}); + } + } + } + + return predecessorMap; +} + /* ************************************************************************* */ // Return the orientations of a graph including only BetweenFactors static VectorValues computeOrientations(const NonlinearFactorGraph& pose2Graph, - bool useOdometricPath) { + bool useOdometricPath) { gttic(lago_computeOrientations); + PredecessorMap tree; // Find a minimum spanning tree - PredecessorMap tree; if (useOdometricPath) tree = findOdometricPath(pose2Graph); else - tree = findMinimumSpanningTree >(pose2Graph); + tree = findMinimumSpanningTree(pose2Graph); // Create a linear factor graph (LFG) of scalars key2doubleMap deltaThetaMap; - vector spanningTreeIds; // ids of between factors forming the spanning tree T - vector chordsIds; // ids of between factors corresponding to chordsIds wrt T + vector + spanningTreeIds; // ids of between factors forming the spanning tree T + vector + chordsIds; // ids of between factors corresponding to chordsIds wrt T getSymbolicGraph(spanningTreeIds, chordsIds, deltaThetaMap, tree, pose2Graph); // temporary structure to correct wraparounds along loops key2doubleMap orientationsToRoot = computeThetasToRoot(deltaThetaMap, tree); // regularize measurements and plug everything in a factor graph - GaussianFactorGraph lagoGraph = buildLinearOrientationGraph(spanningTreeIds, - chordsIds, pose2Graph, orientationsToRoot, tree); + GaussianFactorGraph lagoGraph = buildLinearOrientationGraph( + spanningTreeIds, chordsIds, pose2Graph, orientationsToRoot, tree); // Solve the LFG VectorValues orientationsLago = lagoGraph.optimize(); @@ -256,8 +295,7 @@ static VectorValues computeOrientations(const NonlinearFactorGraph& pose2Graph, /* ************************************************************************* */ VectorValues initializeOrientations(const NonlinearFactorGraph& graph, - bool useOdometricPath) { - + bool useOdometricPath) { // We "extract" the Pose2 subgraph of the original graph: this // is done to properly model priors and avoiding operating on a larger graph NonlinearFactorGraph pose2Graph = initialize::buildPoseGraph(graph); @@ -314,8 +352,7 @@ Values computePoses(const NonlinearFactorGraph& pose2graph, } } // add prior - linearPose2graph.add(initialize::kAnchorKey, I3, Vector3(0.0, 0.0, 0.0), - priorPose2Noise); + linearPose2graph.add(kAnchorKey, I3, Vector3(0.0, 0.0, 0.0), priorPose2Noise); // optimize VectorValues posesLago = linearPose2graph.optimize(); @@ -324,7 +361,7 @@ Values computePoses(const NonlinearFactorGraph& pose2graph, Values initialGuessLago; for(const VectorValues::value_type& it: posesLago) { Key key = it.first; - if (key != initialize::kAnchorKey) { + if (key != kAnchorKey) { const Vector& poseVector = it.second; Pose2 poseLago = Pose2(poseVector(0), poseVector(1), orientationsLago.at(key)(0) + poseVector(2)); @@ -361,7 +398,7 @@ Values initialize(const NonlinearFactorGraph& graph, // for all nodes in the tree for(const VectorValues::value_type& it: orientations) { Key key = it.first; - if (key != initialize::kAnchorKey) { + if (key != kAnchorKey) { const Pose2& pose = initialGuess.at(key); const Vector& orientation = it.second; Pose2 poseLago = Pose2(pose.x(), pose.y(), orientation(0)); diff --git a/gtsam/slam/lago.h b/gtsam/slam/lago.h index 0df80ac42..5661a7cba 100644 --- a/gtsam/slam/lago.h +++ b/gtsam/slam/lago.h @@ -37,19 +37,19 @@ #include #include #include -#include namespace gtsam { namespace lago { typedef std::map key2doubleMap; +typedef std::map PredecessorMap; /** * Compute the cumulative orientations (without wrapping) * for all nodes wrt the root (root has zero orientation). */ GTSAM_EXPORT key2doubleMap computeThetasToRoot( - const key2doubleMap& deltaThetaMap, const PredecessorMap& tree); + const key2doubleMap& deltaThetaMap, const PredecessorMap& tree); /** * Given a factor graph "g", and a spanning tree "tree", select the nodes belonging @@ -62,13 +62,20 @@ GTSAM_EXPORT key2doubleMap computeThetasToRoot( GTSAM_EXPORT void getSymbolicGraph( /*OUTPUTS*/std::vector& spanningTreeIds, std::vector& chordsIds, key2doubleMap& deltaThetaMap, - /*INPUTS*/const PredecessorMap& tree, const NonlinearFactorGraph& g); + /*INPUTS*/const PredecessorMap& tree, const NonlinearFactorGraph& g); /** Linear factor graph with regularized orientation measurements */ GTSAM_EXPORT GaussianFactorGraph buildLinearOrientationGraph( const std::vector& spanningTreeIds, const std::vector& chordsIds, const NonlinearFactorGraph& g, - const key2doubleMap& orientationsToRoot, const PredecessorMap& tree); + const key2doubleMap& orientationsToRoot, const PredecessorMap& tree); + +/** Given a "pose2" factor graph, find its minimum spanning tree. + * Note: All 'Pose2' Between factors are given equal weightage. + * Note: Assumes all the edges (factors) are Between factors. + */ +GTSAM_EXPORT PredecessorMap findMinimumSpanningTree( + const NonlinearFactorGraph& pose2Graph); /** LAGO: Return the orientations of the Pose2 in a generic factor graph */ GTSAM_EXPORT VectorValues initializeOrientations( diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index 9413ae4bd..8591a3932 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -22,8 +22,6 @@ #include #include -#include - #include #include @@ -38,7 +36,8 @@ TEST(dataSet, findExampleDataFile) { const string expected_end = "examples/Data/example.graph"; const string actual = findExampleDataFile("example"); string actual_end = actual.substr(actual.size() - expected_end.size(), expected_end.size()); - boost::replace_all(actual_end, "\\", "/"); // Convert directory separators to forward-slash + // replace all ocurrences of \\ with / use stl + std::replace(actual_end.begin(), actual_end.end(), '\\', '/'); EXPECT(assert_equal(expected_end, actual_end)); } @@ -92,9 +91,7 @@ TEST( dataSet, parseEdge) TEST(dataSet, load2D) { ///< The structure where we will save the SfM data const string filename = findExampleDataFile("w100.graph"); - NonlinearFactorGraph::shared_ptr graph; - Values::shared_ptr initial; - boost::tie(graph, initial) = load2D(filename); + const auto [graph, initial] = load2D(filename); EXPECT_LONGS_EQUAL(300, graph->size()); EXPECT_LONGS_EQUAL(100, initial->size()); auto model = noiseModel::Unit::Create(3); @@ -135,20 +132,17 @@ TEST(dataSet, load2D) { /* ************************************************************************* */ TEST(dataSet, load2DVictoriaPark) { const string filename = findExampleDataFile("victoria_park.txt"); - NonlinearFactorGraph::shared_ptr graph; - Values::shared_ptr initial; - // Load all - boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(10608, graph->size()); - EXPECT_LONGS_EQUAL(7120, initial->size()); + const auto [graph1, initial1] = load2D(filename); + EXPECT_LONGS_EQUAL(10608, graph1->size()); + EXPECT_LONGS_EQUAL(7120, initial1->size()); // Restrict keys size_t maxIndex = 5; - boost::tie(graph, initial) = load2D(filename, nullptr, maxIndex); - EXPECT_LONGS_EQUAL(5, graph->size()); - EXPECT_LONGS_EQUAL(6, initial->size()); // file has 0 as well - EXPECT_LONGS_EQUAL(L(5), graph->at(4)->keys()[1]); + const auto [graph2, initial2] = load2D(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, graph2->size()); + EXPECT_LONGS_EQUAL(6, initial2->size()); // file has 0 as well + EXPECT_LONGS_EQUAL(L(5), graph2->at(4)->keys()[1]); } /* ************************************************************************* */ @@ -218,10 +212,8 @@ TEST(dataSet, readG2o3D) { } // Check graph version - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; bool is3D = true; - boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D); + const auto [actualGraph, actualValues] = readG2o(g2oFile, is3D); EXPECT(assert_equal(expectedGraph, *actualGraph, 1e-5)); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualValues->at(j), 1e-5)); @@ -232,10 +224,8 @@ TEST(dataSet, readG2o3D) { TEST( dataSet, readG2o3DNonDiagonalNoise) { const string g2oFile = findExampleDataFile("pose3example-offdiagonal.txt"); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; bool is3D = true; - boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D); + const auto [actualGraph, actualValues] = readG2o(g2oFile, is3D); Values expectedValues; Rot3 R0 = Rot3::Quaternion(1.000000, 0.000000, 0.000000, 0.000000 ); @@ -327,9 +317,7 @@ static NonlinearFactorGraph expectedGraph(const SharedNoiseModel& model) { /* ************************************************************************* */ TEST(dataSet, readG2o) { const string g2oFile = findExampleDataFile("pose2example"); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; - boost::tie(actualGraph, actualValues) = readG2o(g2oFile); + const auto [actualGraph, actualValues] = readG2o(g2oFile); auto model = noiseModel::Diagonal::Precisions( Vector3(44.721360, 44.721360, 30.901699)); @@ -353,10 +341,8 @@ TEST(dataSet, readG2o) { /* ************************************************************************* */ TEST(dataSet, readG2oHuber) { const string g2oFile = findExampleDataFile("pose2example"); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; bool is3D = false; - boost::tie(actualGraph, actualValues) = + const auto [actualGraph, actualValues] = readG2o(g2oFile, is3D, KernelFunctionTypeHUBER); auto baseModel = noiseModel::Diagonal::Precisions( @@ -370,10 +356,8 @@ TEST(dataSet, readG2oHuber) { /* ************************************************************************* */ TEST(dataSet, readG2oTukey) { const string g2oFile = findExampleDataFile("pose2example"); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; bool is3D = false; - boost::tie(actualGraph, actualValues) = + const auto [actualGraph, actualValues] = readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY); auto baseModel = noiseModel::Diagonal::Precisions( @@ -388,16 +372,12 @@ TEST(dataSet, readG2oTukey) { TEST( dataSet, writeG2o) { const string g2oFile = findExampleDataFile("pose2example"); - NonlinearFactorGraph::shared_ptr expectedGraph; - Values::shared_ptr expectedValues; - boost::tie(expectedGraph, expectedValues) = readG2o(g2oFile); + const auto [expectedGraph, expectedValues] = readG2o(g2oFile); const string filenameToWrite = createRewrittenFileName(g2oFile); writeG2o(*expectedGraph, *expectedValues, filenameToWrite); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; - boost::tie(actualGraph, actualValues) = readG2o(filenameToWrite); + const auto [actualGraph, actualValues] = readG2o(filenameToWrite); EXPECT(assert_equal(*expectedValues,*actualValues,1e-5)); EXPECT(assert_equal(*expectedGraph,*actualGraph,1e-5)); } @@ -406,17 +386,13 @@ TEST( dataSet, writeG2o) TEST( dataSet, writeG2o3D) { const string g2oFile = findExampleDataFile("pose3example"); - NonlinearFactorGraph::shared_ptr expectedGraph; - Values::shared_ptr expectedValues; bool is3D = true; - boost::tie(expectedGraph, expectedValues) = readG2o(g2oFile, is3D); + const auto [expectedGraph, expectedValues] = readG2o(g2oFile, is3D); const string filenameToWrite = createRewrittenFileName(g2oFile); writeG2o(*expectedGraph, *expectedValues, filenameToWrite); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; - boost::tie(actualGraph, actualValues) = readG2o(filenameToWrite, is3D); + const auto [actualGraph, actualValues] = readG2o(filenameToWrite, is3D); EXPECT(assert_equal(*expectedValues,*actualValues,1e-4)); EXPECT(assert_equal(*expectedGraph,*actualGraph,1e-4)); } @@ -425,17 +401,13 @@ TEST( dataSet, writeG2o3D) TEST( dataSet, writeG2o3DNonDiagonalNoise) { const string g2oFile = findExampleDataFile("pose3example-offdiagonal"); - NonlinearFactorGraph::shared_ptr expectedGraph; - Values::shared_ptr expectedValues; bool is3D = true; - boost::tie(expectedGraph, expectedValues) = readG2o(g2oFile, is3D); + const auto [expectedGraph, expectedValues] = readG2o(g2oFile, is3D); const string filenameToWrite = createRewrittenFileName(g2oFile); writeG2o(*expectedGraph, *expectedValues, filenameToWrite); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; - boost::tie(actualGraph, actualValues) = readG2o(filenameToWrite, is3D); + const auto [actualGraph, actualValues] = readG2o(filenameToWrite, is3D); EXPECT(assert_equal(*expectedValues,*actualValues,1e-4)); EXPECT(assert_equal(*expectedGraph,*actualGraph,1e-4)); } diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index 553791385..ec37b4105 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -50,7 +50,7 @@ class Graph: public NonlinearFactorGraph { public: void addMeasurement(int i, int j, const Point2& z, const SharedNoiseModel& model) { - push_back(std::make_shared(z, model, X(i), L(j))); + emplace_shared(z, model, X(i), L(j)); } void addCameraConstraint(int j, const GeneralCamera& p) { @@ -483,10 +483,7 @@ TEST(GeneralSFMFactor, BinaryJacobianFactor) { actualJacobian.augmentedInformation(), 1e-9)); // Construct from GaussianFactorGraph - GaussianFactorGraph gfg1; - gfg1 += expected; - GaussianFactorGraph gfg2; - gfg2 += actual; + GaussianFactorGraph gfg1 {expected}, gfg2 {actual}; HessianFactor hessian1(gfg1), hessian2(gfg2); EXPECT(assert_equal(hessian1, hessian2, 1e-9)); } diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index f8385352f..3be35f793 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -51,7 +51,7 @@ class Graph: public NonlinearFactorGraph { public: void addMeasurement(const int& i, const int& j, const Point2& z, const SharedNoiseModel& model) { - push_back(std::make_shared(z, model, X(i), L(j))); + emplace_shared(z, model, X(i), L(j)); } void addCameraConstraint(int j, const GeneralCamera& p) { diff --git a/gtsam/slam/tests/testInitializePose.cpp b/gtsam/slam/tests/testInitializePose.cpp index 1373cb576..bb8b7949d 100644 --- a/gtsam/slam/tests/testInitializePose.cpp +++ b/gtsam/slam/tests/testInitializePose.cpp @@ -33,10 +33,8 @@ using namespace gtsam; /* ************************************************************************* */ TEST(InitializePose3, computePoses2D) { const string g2oFile = findExampleDataFile("noisyToyGraph.txt"); - NonlinearFactorGraph::shared_ptr inputGraph; - Values::shared_ptr posesInFile; bool is3D = false; - boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + const auto [inputGraph, posesInFile] = readG2o(g2oFile, is3D); auto priorModel = noiseModel::Unit::Create(3); inputGraph->addPrior(0, posesInFile->at(0), priorModel); @@ -56,10 +54,8 @@ TEST(InitializePose3, computePoses2D) { /* ************************************************************************* */ TEST(InitializePose3, computePoses3D) { const string g2oFile = findExampleDataFile("Klaus3"); - NonlinearFactorGraph::shared_ptr inputGraph; - Values::shared_ptr posesInFile; bool is3D = true; - boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + const auto [inputGraph, posesInFile] = readG2o(g2oFile, is3D); auto priorModel = noiseModel::Unit::Create(6); inputGraph->addPrior(0, posesInFile->at(0), priorModel); diff --git a/gtsam/slam/tests/testInitializePose3.cpp b/gtsam/slam/tests/testInitializePose3.cpp index 37d65320a..e45ee0461 100644 --- a/gtsam/slam/tests/testInitializePose3.cpp +++ b/gtsam/slam/tests/testInitializePose3.cpp @@ -231,10 +231,8 @@ TEST( InitializePose3, orientationsGradient ) { // writeG2o(pose3Graph, givenPoses, g2oFile); const string matlabResultsfile = findExampleDataFile("simpleGraph10gradIter"); - NonlinearFactorGraph::shared_ptr matlabGraph; - Values::shared_ptr matlabValues; bool is3D = true; - boost::tie(matlabGraph, matlabValues) = readG2o(matlabResultsfile, is3D); + const auto [matlabGraph, matlabValues] = readG2o(matlabResultsfile, is3D); Rot3 R0Expected = matlabValues->at(1).rotation(); EXPECT(assert_equal(R0Expected, orientations.at(x0), 1e-4)); @@ -266,10 +264,8 @@ TEST( InitializePose3, posesWithGivenGuess ) { /* ************************************************************************* */ TEST(InitializePose3, initializePoses) { const string g2oFile = findExampleDataFile("pose3example-grid"); - NonlinearFactorGraph::shared_ptr inputGraph; - Values::shared_ptr posesInFile; bool is3D = true; - boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + const auto [inputGraph, posesInFile] = readG2o(g2oFile, is3D); auto priorModel = noiseModel::Unit::Create(6); inputGraph->addPrior(0, Pose3(), priorModel); diff --git a/gtsam/slam/tests/testLago.cpp b/gtsam/slam/tests/testLago.cpp index fda39c169..777686284 100644 --- a/gtsam/slam/tests/testLago.cpp +++ b/gtsam/slam/tests/testLago.cpp @@ -20,6 +20,7 @@ */ #include +#include #include #include #include @@ -64,48 +65,79 @@ NonlinearFactorGraph graph() { } } +/*******************************************************************************/ +TEST(Lago, findMinimumSpanningTree) { + NonlinearFactorGraph g = simpleLago::graph(); + auto gPlus = initialize::buildPoseGraph(g); + lago::PredecessorMap tree = lago::findMinimumSpanningTree(gPlus); + + /* We should recover the following spanning tree: + x2 + / \ + / \ + x3 x1 + / + / + x0 + | + a + */ + using initialize::kAnchorKey; + EXPECT_LONGS_EQUAL(kAnchorKey, tree[kAnchorKey]); + EXPECT_LONGS_EQUAL(kAnchorKey, tree[x0]); + EXPECT_LONGS_EQUAL(x0, tree[x1]); + EXPECT_LONGS_EQUAL(x1, tree[x2]); + EXPECT_LONGS_EQUAL(x2, tree[x3]); +} + /* *************************************************************************** */ TEST( Lago, checkSTandChords ) { NonlinearFactorGraph g = simpleLago::graph(); - PredecessorMap tree = findMinimumSpanningTree >(g); + auto gPlus = initialize::buildPoseGraph(g); + lago::PredecessorMap tree = lago::findMinimumSpanningTree(gPlus); lago::key2doubleMap deltaThetaMap; vector spanningTreeIds; // ids of between factors forming the spanning tree T vector chordsIds; // ids of between factors corresponding to chordsIds wrt T lago::getSymbolicGraph(spanningTreeIds, chordsIds, deltaThetaMap, tree, g); - DOUBLES_EQUAL(spanningTreeIds[0], 0, 1e-6); // factor 0 is the first in the ST (0->1) - DOUBLES_EQUAL(spanningTreeIds[1], 3, 1e-6); // factor 3 is the second in the ST(2->0) - DOUBLES_EQUAL(spanningTreeIds[2], 4, 1e-6); // factor 4 is the third in the ST(0->3) + EXPECT_LONGS_EQUAL(0, spanningTreeIds[0]); // factor 0 is the first in the ST(0->1) + EXPECT_LONGS_EQUAL(1, spanningTreeIds[1]); // factor 1 is the second in the ST(1->2) + EXPECT_LONGS_EQUAL(2, spanningTreeIds[2]); // factor 2 is the third in the ST(2->3) } /* *************************************************************************** */ -TEST( Lago, orientationsOverSpanningTree ) { +TEST(Lago, orientationsOverSpanningTree) { NonlinearFactorGraph g = simpleLago::graph(); - PredecessorMap tree = findMinimumSpanningTree >(g); + auto gPlus = initialize::buildPoseGraph(g); + lago::PredecessorMap tree = lago::findMinimumSpanningTree(gPlus); // check the tree structure - EXPECT_LONGS_EQUAL(tree[x0], x0); - EXPECT_LONGS_EQUAL(tree[x1], x0); - EXPECT_LONGS_EQUAL(tree[x2], x0); - EXPECT_LONGS_EQUAL(tree[x3], x0); + using initialize::kAnchorKey; + + EXPECT_LONGS_EQUAL(kAnchorKey, tree[x0]); + EXPECT_LONGS_EQUAL(x0, tree[x1]); + EXPECT_LONGS_EQUAL(x1, tree[x2]); + EXPECT_LONGS_EQUAL(x2, tree[x3]); lago::key2doubleMap expected; - expected[x0]= 0; - expected[x1]= M_PI/2; // edge x0->x1 (consistent with edge (x0,x1)) - expected[x2]= -M_PI; // edge x0->x2 (traversed backwards wrt edge (x2,x0)) - expected[x3]= -M_PI/2; // edge x0->x3 (consistent with edge (x0,x3)) + expected[x0] = 0; + expected[x1] = M_PI / 2; // edges traversed: x0->x1 + expected[x2] = M_PI; // edges traversed: x0->x1->x2 + expected[x3] = 3 * M_PI / 2; // edges traversed: x0->x1->x2->x3 lago::key2doubleMap deltaThetaMap; - vector spanningTreeIds; // ids of between factors forming the spanning tree T - vector chordsIds; // ids of between factors corresponding to chordsIds wrt T - lago::getSymbolicGraph(spanningTreeIds, chordsIds, deltaThetaMap, tree, g); + vector + spanningTreeIds; // ids of between factors forming the spanning tree T + vector + chordsIds; // ids of between factors corresponding to chordsIds wrt T + lago::getSymbolicGraph(spanningTreeIds, chordsIds, deltaThetaMap, tree, + gPlus); lago::key2doubleMap actual; actual = lago::computeThetasToRoot(deltaThetaMap, tree); + DOUBLES_EQUAL(expected[x0], actual[x0], 1e-6); DOUBLES_EQUAL(expected[x1], actual[x1], 1e-6); DOUBLES_EQUAL(expected[x2], actual[x2], 1e-6); @@ -115,24 +147,24 @@ TEST( Lago, orientationsOverSpanningTree ) { /* *************************************************************************** */ TEST( Lago, regularizedMeasurements ) { NonlinearFactorGraph g = simpleLago::graph(); - PredecessorMap tree = findMinimumSpanningTree >(g); + auto gPlus = initialize::buildPoseGraph(g); + lago::PredecessorMap tree = lago::findMinimumSpanningTree(gPlus); lago::key2doubleMap deltaThetaMap; vector spanningTreeIds; // ids of between factors forming the spanning tree T vector chordsIds; // ids of between factors corresponding to chordsIds wrt T - lago::getSymbolicGraph(spanningTreeIds, chordsIds, deltaThetaMap, tree, g); + lago::getSymbolicGraph(spanningTreeIds, chordsIds, deltaThetaMap, tree, gPlus); lago::key2doubleMap orientationsToRoot = lago::computeThetasToRoot(deltaThetaMap, tree); - GaussianFactorGraph lagoGraph = lago::buildLinearOrientationGraph(spanningTreeIds, chordsIds, g, orientationsToRoot, tree); + GaussianFactorGraph lagoGraph = lago::buildLinearOrientationGraph(spanningTreeIds, chordsIds, gPlus, orientationsToRoot, tree); std::pair actualAb = lagoGraph.jacobian(); // jacobian corresponding to the orientation measurements (last entry is the prior on the anchor and is disregarded) Vector actual = (Vector(5) << actualAb.second(0),actualAb.second(1),actualAb.second(2),actualAb.second(3),actualAb.second(4)).finished(); // this is the whitened error, so we multiply by the std to unwhiten actual = 0.1 * actual; // Expected regularized measurements (same for the spanning tree, corrected for the chordsIds) - Vector expected = (Vector(5) << M_PI/2, M_PI, -M_PI/2, M_PI/2 - 2*M_PI , M_PI/2).finished(); + Vector expected = (Vector(5) << M_PI/2, M_PI/2, M_PI/2, 0 , -M_PI).finished(); EXPECT(assert_equal(expected, actual, 1e-6)); } @@ -145,8 +177,8 @@ TEST( Lago, smallGraphVectorValues ) { // comparison is up to M_PI, that's why we add some multiples of 2*M_PI EXPECT(assert_equal((Vector(1) << 0.0).finished(), initial.at(x0), 1e-6)); EXPECT(assert_equal((Vector(1) << 0.5 * M_PI).finished(), initial.at(x1), 1e-6)); - EXPECT(assert_equal((Vector(1) << M_PI - 2*M_PI).finished(), initial.at(x2), 1e-6)); - EXPECT(assert_equal((Vector(1) << 1.5 * M_PI - 2*M_PI).finished(), initial.at(x3), 1e-6)); + EXPECT(assert_equal((Vector(1) << M_PI).finished(), initial.at(x2), 1e-6)); + EXPECT(assert_equal((Vector(1) << 1.5 * M_PI).finished(), initial.at(x3), 1e-6)); } /* *************************************************************************** */ @@ -171,8 +203,8 @@ TEST( Lago, multiplePosePriors ) { // comparison is up to M_PI, that's why we add some multiples of 2*M_PI EXPECT(assert_equal((Vector(1) << 0.0).finished(), initial.at(x0), 1e-6)); EXPECT(assert_equal((Vector(1) << 0.5 * M_PI).finished(), initial.at(x1), 1e-6)); - EXPECT(assert_equal((Vector(1) << M_PI - 2*M_PI).finished(), initial.at(x2), 1e-6)); - EXPECT(assert_equal((Vector(1) << 1.5 * M_PI - 2*M_PI).finished(), initial.at(x3), 1e-6)); + EXPECT(assert_equal((Vector(1) << M_PI).finished(), initial.at(x2), 1e-6)); + EXPECT(assert_equal((Vector(1) << 1.5 * M_PI).finished(), initial.at(x3), 1e-6)); } /* *************************************************************************** */ @@ -194,12 +226,12 @@ TEST( Lago, multiplePoseAndRotPriors ) { NonlinearFactorGraph g = simpleLago::graph(); g.addPrior(x1, simpleLago::pose1.theta(), model); VectorValues initial = lago::initializeOrientations(g, useOdometricPath); - + // comparison is up to M_PI, that's why we add some multiples of 2*M_PI EXPECT(assert_equal((Vector(1) << 0.0).finished(), initial.at(x0), 1e-6)); EXPECT(assert_equal((Vector(1) << 0.5 * M_PI).finished(), initial.at(x1), 1e-6)); - EXPECT(assert_equal((Vector(1) << M_PI - 2*M_PI).finished(), initial.at(x2), 1e-6)); - EXPECT(assert_equal((Vector(1) << 1.5 * M_PI - 2*M_PI).finished(), initial.at(x3), 1e-6)); + EXPECT(assert_equal((Vector(1) << M_PI).finished(), initial.at(x2), 1e-6)); + EXPECT(assert_equal((Vector(1) << 1.5 * M_PI).finished(), initial.at(x3), 1e-6)); } /* *************************************************************************** */ @@ -258,9 +290,7 @@ TEST( Lago, smallGraph2 ) { TEST( Lago, largeGraphNoisy_orientations ) { string inputFile = findExampleDataFile("noisyToyGraph"); - NonlinearFactorGraph::shared_ptr g; - Values::shared_ptr initial; - boost::tie(g, initial) = readG2o(inputFile); + const auto [g, initial] = readG2o(inputFile); // Add prior on the pose having index (key) = 0 NonlinearFactorGraph graphWithPrior = *g; @@ -279,9 +309,7 @@ TEST( Lago, largeGraphNoisy_orientations ) { } } string matlabFile = findExampleDataFile("orientationsNoisyToyGraph"); - NonlinearFactorGraph::shared_ptr gmatlab; - Values::shared_ptr expected; - boost::tie(gmatlab, expected) = readG2o(matlabFile); + const auto [gmatlab, expected] = readG2o(matlabFile); for(const auto& key_pose: expected->extract()){ const Key& k = key_pose.first; @@ -294,9 +322,7 @@ TEST( Lago, largeGraphNoisy_orientations ) { TEST( Lago, largeGraphNoisy ) { string inputFile = findExampleDataFile("noisyToyGraph"); - NonlinearFactorGraph::shared_ptr g; - Values::shared_ptr initial; - boost::tie(g, initial) = readG2o(inputFile); + const auto [g, initial] = readG2o(inputFile); // Add prior on the pose having index (key) = 0 NonlinearFactorGraph graphWithPrior = *g; @@ -306,9 +332,7 @@ TEST( Lago, largeGraphNoisy ) { Values actual = lago::initialize(graphWithPrior); string matlabFile = findExampleDataFile("optimizedNoisyToyGraph"); - NonlinearFactorGraph::shared_ptr gmatlab; - Values::shared_ptr expected; - boost::tie(gmatlab, expected) = readG2o(matlabFile); + const auto [gmatlab, expected] = readG2o(matlabFile); for(const auto& key_pose: expected->extract()){ const Key& k = key_pose.first; diff --git a/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp b/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp index 9f39082a5..98809e959 100644 --- a/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp +++ b/gtsam/slam/tests/testRegularImplicitSchurFactor.cpp @@ -27,8 +27,6 @@ #include #include -#include -#include #include using namespace std; diff --git a/gtsam/slam/tests/testTriangulationFactor.cpp b/gtsam/slam/tests/testTriangulationFactor.cpp index 4173a54f8..29b0330c6 100644 --- a/gtsam/slam/tests/testTriangulationFactor.cpp +++ b/gtsam/slam/tests/testTriangulationFactor.cpp @@ -25,8 +25,6 @@ #include #include -#include - using namespace std; using namespace gtsam; using namespace std::placeholders; diff --git a/gtsam/symbolic/SymbolicBayesNet.h b/gtsam/symbolic/SymbolicBayesNet.h index f2c142762..bbfc968e1 100644 --- a/gtsam/symbolic/SymbolicBayesNet.h +++ b/gtsam/symbolic/SymbolicBayesNet.h @@ -69,7 +69,7 @@ namespace gtsam { /// Construct from a single conditional SymbolicBayesNet(SymbolicConditional&& c) { - push_back(std::make_shared(c)); + emplace_shared(c); } /** @@ -79,7 +79,7 @@ namespace gtsam { * SymbolicBayesNet(SymbolicConditional(...))(SymbolicConditional(...)); */ SymbolicBayesNet& operator()(SymbolicConditional&& c) { - push_back(std::make_shared(c)); + emplace_shared(c); return *this; } diff --git a/gtsam/symbolic/SymbolicFactor-inst.h b/gtsam/symbolic/SymbolicFactor-inst.h index 85ddf9c79..db0eb05ca 100644 --- a/gtsam/symbolic/SymbolicFactor-inst.h +++ b/gtsam/symbolic/SymbolicFactor-inst.h @@ -61,9 +61,10 @@ namespace gtsam std::set_difference(allKeys.begin(), allKeys.end(), frontals.begin(), frontals.end(), orderedKeys.begin() + nFrontals); // Return resulting conditional and factor - return std::make_pair( + return { SymbolicConditional::FromKeysShared(orderedKeys, nFrontals), - SymbolicFactor::FromIteratorsShared(orderedKeys.begin() + nFrontals, orderedKeys.end())); + SymbolicFactor::FromIteratorsShared(orderedKeys.begin() + nFrontals, orderedKeys.end()) + }; } } } diff --git a/gtsam/symbolic/SymbolicFactor.cpp b/gtsam/symbolic/SymbolicFactor.cpp index e51b3cf29..1f26389e9 100644 --- a/gtsam/symbolic/SymbolicFactor.cpp +++ b/gtsam/symbolic/SymbolicFactor.cpp @@ -49,7 +49,7 @@ namespace gtsam { SymbolicFactor::eliminate(const Ordering& keys) const { SymbolicFactorGraph graph; - graph += *this; // TODO: Is there a way to avoid copying this factor? + graph.push_back(*this); // TODO: Is there a way to avoid copying this factor? return EliminateSymbolic(graph, keys); } diff --git a/gtsam/symbolic/SymbolicFactorGraph.cpp b/gtsam/symbolic/SymbolicFactorGraph.cpp index ddcb3b2f3..1f17b8af2 100644 --- a/gtsam/symbolic/SymbolicFactorGraph.cpp +++ b/gtsam/symbolic/SymbolicFactorGraph.cpp @@ -40,22 +40,22 @@ namespace gtsam { /* ************************************************************************* */ void SymbolicFactorGraph::push_factor(Key key) { - push_back(std::make_shared(key)); + emplace_shared(key); } /* ************************************************************************* */ void SymbolicFactorGraph::push_factor(Key key1, Key key2) { - push_back(std::make_shared(key1,key2)); + emplace_shared(key1,key2); } /* ************************************************************************* */ void SymbolicFactorGraph::push_factor(Key key1, Key key2, Key key3) { - push_back(std::make_shared(key1,key2,key3)); + emplace_shared(key1,key2,key3); } /* ************************************************************************* */ void SymbolicFactorGraph::push_factor(Key key1, Key key2, Key key3, Key key4) { - push_back(std::make_shared(key1,key2,key3,key4)); + emplace_shared(key1,key2,key3,key4); } /* ************************************************************************* */ diff --git a/gtsam/symbolic/SymbolicFactorGraph.h b/gtsam/symbolic/SymbolicFactorGraph.h index 01b532a5c..fdd5bc27c 100644 --- a/gtsam/symbolic/SymbolicFactorGraph.h +++ b/gtsam/symbolic/SymbolicFactorGraph.h @@ -97,7 +97,7 @@ namespace gtsam { /// Construct from a single factor SymbolicFactorGraph(SymbolicFactor&& c) { - push_back(std::make_shared(c)); + emplace_shared(c); } /** @@ -107,7 +107,7 @@ namespace gtsam { * SymbolicFactorGraph(SymbolicFactor(...))(SymbolicFactor(...)); */ SymbolicFactorGraph& operator()(SymbolicFactor&& c) { - push_back(std::make_shared(c)); + emplace_shared(c); return *this; } diff --git a/gtsam/symbolic/tests/testSymbolicBayesNet.cpp b/gtsam/symbolic/tests/testSymbolicBayesNet.cpp index 64caf399d..bf2b6a214 100644 --- a/gtsam/symbolic/tests/testSymbolicBayesNet.cpp +++ b/gtsam/symbolic/tests/testSymbolicBayesNet.cpp @@ -83,9 +83,9 @@ TEST(SymbolicBayesNet, Dot) { using symbol_shorthand::A; using symbol_shorthand::X; SymbolicBayesNet bn; - bn += SymbolicConditional(X(3), X(2), A(2)); - bn += SymbolicConditional(X(2), X(1), A(1)); - bn += SymbolicConditional(X(1)); + bn.emplace_shared(X(3), X(2), A(2)); + bn.emplace_shared(X(2), X(1), A(1)); + bn.emplace_shared(X(1)); DotWriter writer; writer.positionHints.emplace('a', 2); diff --git a/gtsam/symbolic/tests/testSymbolicBayesTree.cpp b/gtsam/symbolic/tests/testSymbolicBayesTree.cpp index d14523719..e4a47bdfb 100644 --- a/gtsam/symbolic/tests/testSymbolicBayesTree.cpp +++ b/gtsam/symbolic/tests/testSymbolicBayesTree.cpp @@ -22,11 +22,10 @@ #include #include -#include -using boost::adaptors::indirected; - #include #include +#include +#include using namespace std; using namespace gtsam; @@ -34,6 +33,24 @@ using namespace gtsam::symbol_shorthand; static bool debug = false; +// Given a vector of shared pointers infer the type of the pointed-to objects +template +using PointedToType = std::decay_t().begin())>; + +// Given a vector of shared pointers infer the type of the pointed-to objects +template +using ValuesVector = std::vector>; + +// Return a vector of dereferenced values +template +ValuesVector deref(const T& v) { + ValuesVector result; + for (auto& t : v) + result.push_back(*t); + return result; +} + + /* ************************************************************************* */ TEST(SymbolicBayesTree, clear) { SymbolicBayesTree bayesTree = asiaBayesTree; @@ -111,8 +128,7 @@ TEST(BayesTree, removePath) { bayesTree.removePath(bayesTree[_C_], &bn, &orphans); SymbolicFactorGraph factors(bn); CHECK(assert_equal(expected, factors)); - CHECK(assert_container_equal(expectedOrphans | indirected, - orphans | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans), deref(orphans))); bayesTree = bayesTreeOrig; @@ -127,8 +143,7 @@ TEST(BayesTree, removePath) { bayesTree.removePath(bayesTree[_E_], &bn2, &orphans2); SymbolicFactorGraph factors2(bn2); CHECK(assert_equal(expected2, factors2)); - CHECK(assert_container_equal(expectedOrphans2 | indirected, - orphans2 | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans2), deref(orphans2))); } /* ************************************************************************* */ @@ -147,8 +162,7 @@ TEST(BayesTree, removePath2) { CHECK(assert_equal(expected, factors)); SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_S_], bayesTree[_T_], bayesTree[_X_]}; - CHECK(assert_container_equal(expectedOrphans | indirected, - orphans | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans), deref(orphans))); } /* ************************************************************************* */ @@ -167,8 +181,7 @@ TEST(BayesTree, removePath3) { expected.emplace_shared(_T_, _E_, _L_); CHECK(assert_equal(expected, factors)); SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_S_], bayesTree[_X_]}; - CHECK(assert_container_equal(expectedOrphans | indirected, - orphans | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans), deref(orphans))); } void getAllCliques(const SymbolicBayesTree::sharedClique& subtree, @@ -244,13 +257,12 @@ TEST(BayesTree, removeTop) { // Check expected outcome SymbolicBayesNet expected; - expected += SymbolicConditional::FromKeys(Keys(_E_)(_L_)(_B_), 3); - expected += SymbolicConditional::FromKeys(Keys(_S_)(_B_)(_L_), 1); + expected.add(SymbolicConditional::FromKeys(Keys(_E_)(_L_)(_B_), 3)); + expected.add(SymbolicConditional::FromKeys(Keys(_S_)(_B_)(_L_), 1)); CHECK(assert_equal(expected, bn)); SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_T_], bayesTree[_X_]}; - CHECK(assert_container_equal(expectedOrphans | indirected, - orphans | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans), deref(orphans))); // Try removeTop again with a factor that should not change a thing // std::shared_ptr newFactor2(new IndexFactor(_B_)); @@ -261,8 +273,7 @@ TEST(BayesTree, removeTop) { SymbolicFactorGraph expected2; CHECK(assert_equal(expected2, factors2)); SymbolicBayesTree::Cliques expectedOrphans2; - CHECK(assert_container_equal(expectedOrphans2 | indirected, - orphans2 | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans2), deref(orphans2))); } /* ************************************************************************* */ @@ -286,8 +297,7 @@ TEST(BayesTree, removeTop2) { CHECK(assert_equal(expected, bn)); SymbolicBayesTree::Cliques expectedOrphans{bayesTree[_S_], bayesTree[_X_]}; - CHECK(assert_container_equal(expectedOrphans | indirected, - orphans | indirected)); + CHECK(assert_container_equal(deref(expectedOrphans), deref(orphans))); } /* ************************************************************************* */ diff --git a/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp b/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp index c93eb8593..47ef754a6 100644 --- a/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp +++ b/gtsam/symbolic/tests/testSymbolicEliminationTree.cpp @@ -110,19 +110,20 @@ TEST(EliminationTree, Create2) { // \ | // l3 SymbolicFactorGraph graph; + graph.emplace_shared(X(1), L(1)); + graph.emplace_shared(X(1), X(2)); + graph.emplace_shared(X(2), L(1)); + graph.emplace_shared(X(2), X(3)); + graph.emplace_shared(X(3), X(4)); + graph.emplace_shared(X(4), L(2)); + graph.emplace_shared(X(4), X(5)); + graph.emplace_shared(L(2), X(5)); + graph.emplace_shared(X(4), L(3)); + graph.emplace_shared(X(5), L(3)); + auto binary = [](Key j1, Key j2) -> SymbolicFactor { return SymbolicFactor(j1, j2); }; - graph += binary(X(1), L(1)); - graph += binary(X(1), X(2)); - graph += binary(X(2), L(1)); - graph += binary(X(2), X(3)); - graph += binary(X(3), X(4)); - graph += binary(X(4), L(2)); - graph += binary(X(4), X(5)); - graph += binary(L(2), X(5)); - graph += binary(X(4), L(3)); - graph += binary(X(5), L(3)); SymbolicEliminationTree expected = EliminationTreeTester::MakeTree( // ChildNodes( // diff --git a/gtsam/symbolic/tests/testSymbolicFactor.cpp b/gtsam/symbolic/tests/testSymbolicFactor.cpp index 646b80ad6..7ba538d8e 100644 --- a/gtsam/symbolic/tests/testSymbolicFactor.cpp +++ b/gtsam/symbolic/tests/testSymbolicFactor.cpp @@ -21,7 +21,6 @@ #include #include -#include using namespace std; using namespace gtsam; @@ -75,9 +74,7 @@ TEST(SymbolicFactor, EliminateSymbolic) const SymbolicConditional expectedConditional = SymbolicConditional::FromKeys(KeyVector{0,1,2,3,4,5,6}, 4); - SymbolicFactor::shared_ptr actualFactor; - SymbolicConditional::shared_ptr actualConditional; - boost::tie(actualConditional, actualFactor) = + const auto [actualConditional, actualFactor] = EliminateSymbolic(factors, Ordering{0, 1, 2, 3}); CHECK(assert_equal(expectedConditional, *actualConditional)); diff --git a/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp b/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp index 6c8924be4..8afb506ab 100644 --- a/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp +++ b/gtsam/symbolic/tests/testSymbolicFactorGraph.cpp @@ -65,17 +65,13 @@ TEST(SymbolicFactorGraph, eliminatePartialSequential) { const auto expectedSfg = SymbolicFactorGraph(SymbolicFactor(2, 3))( SymbolicFactor(4, 5))(SymbolicFactor(2, 3, 4)); - SymbolicBayesNet::shared_ptr actualBayesNet; - SymbolicFactorGraph::shared_ptr actualSfg; - boost::tie(actualBayesNet, actualSfg) = + const auto [actualBayesNet, actualSfg] = simpleTestGraph2.eliminatePartialSequential(Ordering{0, 1}); EXPECT(assert_equal(expectedSfg, *actualSfg)); EXPECT(assert_equal(expectedBayesNet, *actualBayesNet)); - SymbolicBayesNet::shared_ptr actualBayesNet2; - SymbolicFactorGraph::shared_ptr actualSfg2; - boost::tie(actualBayesNet2, actualSfg2) = + const auto [actualBayesNet2, actualSfg2] = simpleTestGraph2.eliminatePartialSequential(Ordering{0, 1}); EXPECT(assert_equal(expectedSfg, *actualSfg2)); @@ -84,8 +80,7 @@ TEST(SymbolicFactorGraph, eliminatePartialSequential) { /* ************************************************************************* */ TEST(SymbolicFactorGraph, eliminateFullMultifrontal) { - Ordering ordering; - ordering += 0, 1, 2, 3; + Ordering ordering{0, 1, 2, 3}; SymbolicBayesTree actual1 = *simpleChain.eliminateMultifrontal(ordering); EXPECT(assert_equal(simpleChainBayesTree, actual1)); @@ -106,9 +101,7 @@ TEST(SymbolicFactorGraph, eliminatePartialMultifrontal) { SymbolicFactorGraph(SymbolicFactor(0, 1))(SymbolicFactor(0, 2))( SymbolicFactor(1, 3))(SymbolicFactor(2, 3))(SymbolicFactor(1)); - SymbolicBayesTree::shared_ptr actualBayesTree; - SymbolicFactorGraph::shared_ptr actualFactorGraph; - boost::tie(actualBayesTree, actualFactorGraph) = + const auto [actualBayesTree, actualFactorGraph] = simpleTestGraph2.eliminatePartialMultifrontal(Ordering{4, 5}); EXPECT(assert_equal(expectedFactorGraph, *actualFactorGraph)); @@ -122,9 +115,7 @@ TEST(SymbolicFactorGraph, eliminatePartialMultifrontal) { std::make_shared(5, 4))); expectedBayesTree2.insertRoot(root2); - SymbolicBayesTree::shared_ptr actualBayesTree2; - SymbolicFactorGraph::shared_ptr actualFactorGraph2; - boost::tie(actualBayesTree2, actualFactorGraph2) = + const auto [actualBayesTree2, actualFactorGraph2] = simpleTestGraph2.eliminatePartialMultifrontal(KeyVector{4, 5}); EXPECT(assert_equal(expectedFactorGraph, *actualFactorGraph2)); @@ -132,14 +123,87 @@ TEST(SymbolicFactorGraph, eliminatePartialMultifrontal) { } /* ************************************************************************* */ -TEST(SymbolicFactorGraph, marginalMultifrontalBayesNet) { - auto expectedBayesNet = - SymbolicBayesNet(SymbolicConditional(0, 1, 2))(SymbolicConditional( - 1, 2, 3))(SymbolicConditional(2, 3))(SymbolicConditional(3)); - - SymbolicBayesNet actual1 = +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesNetOrdering) { + SymbolicBayesNet actual = *simpleTestGraph2.marginalMultifrontalBayesNet(Ordering{0, 1, 2, 3}); - EXPECT(assert_equal(expectedBayesNet, actual1)); + auto expectedBayesNet = SymbolicBayesNet({0, 1, 2})({1, 2, 3})({2, 3})({3}); + EXPECT(assert_equal(expectedBayesNet, actual)); +} + +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesNetKeyVector) { + SymbolicBayesNet actual = + *simpleTestGraph2.marginalMultifrontalBayesNet(KeyVector{0, 1, 2, 3}); + // Since we use KeyVector, the variable ordering will be determined by COLAMD: + auto expectedBayesNet = SymbolicBayesNet({0, 1, 2})({2, 1, 3})({1, 3})({3}); + EXPECT(assert_equal(expectedBayesNet, actual)); +} + +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesNetOrderingPlus) { + const Ordering orderedVariables{0, 3}, + marginalizedVariableOrdering{1, 2, 4, 5}; + SymbolicBayesNet actual = *simpleTestGraph2.marginalMultifrontalBayesNet( + orderedVariables, marginalizedVariableOrdering); + auto expectedBayesNet = SymbolicBayesNet(SymbolicConditional{0, 3})({3}); + EXPECT(assert_equal(expectedBayesNet, actual)); +} + +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesNetKeyVectorPlus) { + const KeyVector variables{0, 1, 3}; + const Ordering marginalizedVariableOrdering{2, 4, 5}; + SymbolicBayesNet actual = *simpleTestGraph2.marginalMultifrontalBayesNet( + variables, marginalizedVariableOrdering); + // Since we use KeyVector, the variable ordering will be determined by COLAMD: + auto expectedBayesNet = SymbolicBayesNet({0, 1, 3})({3, 1})({1}); + EXPECT(assert_equal(expectedBayesNet, actual)); +} + +/* ************************************************************************* */ +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesTreeOrdering) { + auto expectedBayesTree = + *simpleTestGraph2.eliminatePartialMultifrontal(Ordering{4, 5}) + .second->eliminateMultifrontal(Ordering{0, 1, 2, 3}); + + SymbolicBayesTree actual = + *simpleTestGraph2.marginalMultifrontalBayesTree(Ordering{0, 1, 2, 3}); + EXPECT(assert_equal(expectedBayesTree, actual)); +} + +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesTreeKeyVector) { + // Same: KeyVector variant will use COLAMD: + auto expectedBayesTree = + *simpleTestGraph2.eliminatePartialMultifrontal(Ordering{4, 5}) + .second->eliminateMultifrontal(Ordering::OrderingType::COLAMD); + + SymbolicBayesTree actual = + *simpleTestGraph2.marginalMultifrontalBayesTree(KeyVector{0, 1, 2, 3}); + EXPECT(assert_equal(expectedBayesTree, actual)); +} + +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesTreeOrderingPlus) { + const Ordering orderedVariables{0, 3}, + marginalizedVariableOrdering{1, 2, 4, 5}; + auto expectedBayesTree = + *simpleTestGraph2 + .eliminatePartialMultifrontal(marginalizedVariableOrdering) + .second->eliminateMultifrontal(orderedVariables); + + SymbolicBayesTree actual = *simpleTestGraph2.marginalMultifrontalBayesTree( + orderedVariables, marginalizedVariableOrdering); + EXPECT(assert_equal(expectedBayesTree, actual)); +} + +TEST(SymbolicFactorGraph, MarginalMultifrontalBayesTreeKeyVectorPlus) { + // Again: KeyVector variant will use COLAMD: + const Ordering marginalizedVariableOrdering{2, 4, 5}; + auto expectedBayesTree = + *simpleTestGraph2 + .eliminatePartialMultifrontal(marginalizedVariableOrdering) + .second->eliminateMultifrontal(Ordering::OrderingType::COLAMD); + + const KeyVector variables{0, 1, 3}; + SymbolicBayesTree actual = *simpleTestGraph2.marginalMultifrontalBayesTree( + variables, marginalizedVariableOrdering); + EXPECT(assert_equal(expectedBayesTree, actual)); } /* ************************************************************************* */ @@ -152,14 +216,13 @@ TEST(SymbolicFactorGraph, eliminate_disconnected_graph) { // create expected Chordal bayes Net SymbolicBayesNet expected; - expected.push_back(std::make_shared(0, 1, 2)); - expected.push_back(std::make_shared(1, 2)); - expected.push_back(std::make_shared(2)); - expected.push_back(std::make_shared(3, 4)); - expected.push_back(std::make_shared(4)); + expected.emplace_shared(0, 1, 2); + expected.emplace_shared(1, 2); + expected.emplace_shared(2); + expected.emplace_shared(3, 4); + expected.emplace_shared(4); - Ordering order; - order += 0, 1, 2, 3, 4; + const Ordering order{0, 1, 2, 3, 4}; SymbolicBayesNet actual = *fg.eliminateSequential(order); EXPECT(assert_equal(expected, actual)); @@ -229,9 +292,9 @@ TEST(SymbolicFactorGraph, constructFromBayesNet) { // create Bayes Net SymbolicBayesNet bayesNet; - bayesNet += SymbolicConditional(0, 1, 2); - bayesNet += SymbolicConditional(1, 2); - bayesNet += SymbolicConditional(1); + bayesNet.emplace_shared(0, 1, 2); + bayesNet.emplace_shared(1, 2); + bayesNet.emplace_shared(1); // create actual factor graph from a Bayes Net SymbolicFactorGraph actual(bayesNet); @@ -286,7 +349,7 @@ TEST(SymbolicFactorGraph, push_back) { TEST(SymbolicFactorGraph, add_factors) { SymbolicFactorGraph fg1; fg1.push_factor(10); - fg1 += SymbolicFactor::shared_ptr(); // empty slot! + fg1.push_back(SymbolicFactor::shared_ptr()); // empty slot! fg1.push_factor(11); SymbolicFactorGraph fg2; diff --git a/gtsam/symbolic/tests/testSymbolicISAM.cpp b/gtsam/symbolic/tests/testSymbolicISAM.cpp index e84af28a3..c09f6f67e 100644 --- a/gtsam/symbolic/tests/testSymbolicISAM.cpp +++ b/gtsam/symbolic/tests/testSymbolicISAM.cpp @@ -79,8 +79,8 @@ TEST( SymbolicISAM, iSAM ) // Now we modify the Bayes tree by inserting a new factor over B and S SymbolicFactorGraph fullGraph; - fullGraph += asiaGraph; - fullGraph += SymbolicFactor(_B_, _S_); + fullGraph.push_back(asiaGraph); + fullGraph.emplace_shared(_B_, _S_); // This ordering is chosen to match the one chosen by COLAMD during the ISAM update Ordering ordering {_X_, _B_, _S_, _E_, _L_, _T_}; diff --git a/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp b/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp index 796bdc49e..100814fe6 100644 --- a/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp +++ b/gtsam/symbolic/tests/testSymbolicJunctionTree.cpp @@ -35,7 +35,7 @@ using namespace std; ****************************************************************************/ TEST( JunctionTree, constructor ) { - Ordering order; order += 0, 1, 2, 3; + const Ordering order{0, 1, 2, 3}; SymbolicJunctionTree actual(SymbolicEliminationTree(simpleChain, order)); diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index 98a1b4ef9..de59f547e 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -29,6 +29,22 @@ set (excluded_headers # "") "${CMAKE_CURRENT_SOURCE_DIR}/slam/serialization.h" ) +# if GTSAM_USE_BOOST_FEATURES is not set, then we need to exclude the following: +if(NOT GTSAM_USE_BOOST_FEATURES) + list (APPEND excluded_sources + "${CMAKE_CURRENT_SOURCE_DIR}/linear/QPSParser.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/linear/QPSSolver.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/discrete/Scheduler.cpp" + ) + list (APPEND excluded_headers + "${CMAKE_CURRENT_SOURCE_DIR}/linear/QPSParser.h" + "${CMAKE_CURRENT_SOURCE_DIR}/linear/QPSSolver.h" + "${CMAKE_CURRENT_SOURCE_DIR}/discrete/Scheduler.h" + "${CMAKE_CURRENT_SOURCE_DIR}/parition/FindSeparator.h" + "${CMAKE_CURRENT_SOURCE_DIR}/parition/FindSeparator-inl.h" + ) +endif() + # assemble core libraries foreach(subdir ${gtsam_unstable_subdirs}) # Build convenience libraries diff --git a/gtsam_unstable/discrete/Constraint.h b/gtsam_unstable/discrete/Constraint.h index 0622c833c..3526a282d 100644 --- a/gtsam_unstable/discrete/Constraint.h +++ b/gtsam_unstable/discrete/Constraint.h @@ -21,7 +21,6 @@ #include #include -#include #include namespace gtsam { @@ -86,13 +85,13 @@ class GTSAM_UNSTABLE_EXPORT Constraint : public DiscreteFactor { /// Render as markdown table. std::string markdown(const KeyFormatter& keyFormatter = DefaultKeyFormatter, const Names& names = {}) const override { - return (boost::format("`Constraint` on %1% variables\n") % (size())).str(); + return "`Constraint` on " + std::to_string(size()) + " variables\n"; } /// Render as html table. std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, const Names& names = {}) const override { - return (boost::format("

Constraint on %1% variables

") % (size())).str(); + return "

Constraint on " + std::to_string(size()) + " variables

"; } /// @} diff --git a/gtsam_unstable/discrete/Scheduler.cpp b/gtsam_unstable/discrete/Scheduler.cpp index 9543ab7cf..0de4d32c4 100644 --- a/gtsam_unstable/discrete/Scheduler.cpp +++ b/gtsam_unstable/discrete/Scheduler.cpp @@ -248,7 +248,7 @@ DiscreteBayesNet::shared_ptr Scheduler::eliminate() const { // TODO: fix this!! size_t maxKey = keys().size(); Ordering defaultKeyOrdering; - for (size_t i = 0; i < maxKey; ++i) defaultKeyOrdering += Key(i); + for (size_t i = 0; i < maxKey; ++i) defaultKeyOrdering.push_back(i); DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(defaultKeyOrdering); gttoc(my_eliminate); diff --git a/gtsam_unstable/discrete/examples/CMakeLists.txt b/gtsam_unstable/discrete/examples/CMakeLists.txt index da06b7dfc..ba4e278c9 100644 --- a/gtsam_unstable/discrete/examples/CMakeLists.txt +++ b/gtsam_unstable/discrete/examples/CMakeLists.txt @@ -1,5 +1,15 @@ -set (excluded_examples - # fileToExclude.cpp -) +# disable tests if GTSAM_USE_BOOST_FEATURES is OFF +if (NOT GTSAM_USE_BOOST_FEATURES) + set (excluded_examples + # fileToExclude.cpp + "schedulingExample.cpp" + "schedulingQuals12.cpp" + "schedulingQuals13.cpp" + ) +else() + set (excluded_examples "") +endif() + + gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable") diff --git a/gtsam_unstable/discrete/examples/schedulingExample.cpp b/gtsam_unstable/discrete/examples/schedulingExample.cpp index dd92b295d..2b52a3e3a 100644 --- a/gtsam_unstable/discrete/examples/schedulingExample.cpp +++ b/gtsam_unstable/discrete/examples/schedulingExample.cpp @@ -12,8 +12,6 @@ #include #include -#include - #include using namespace std; @@ -171,8 +169,8 @@ void solveStaged(size_t addMutex = 2) { // remove this slot from consideration slotsAvailable[bestSlot] = 0.0; - cout << boost::format("%s = %d (%d), count = %d") % scheduler.studentName(6-s) - % scheduler.slotName(bestSlot) % bestSlot % count << endl; + cout << scheduler.studentName(6 - s) << " = " << scheduler.slotName(bestSlot) << " (" << bestSlot + << "), count = " << count << endl; } tictoc_print_(); @@ -229,9 +227,8 @@ void sampleSolutions() { size_t min = *min_element(stats.begin(), stats.end()); size_t nz = count_if(stats.begin(), stats.end(), NonZero); if (nz >= 15 && max <= 2) { - cout << boost::format( - "Sampled schedule %d, min = %d, nz = %d, max = %d\n") % (n + 1) % min - % nz % max; + cout << "Sampled schedule " << (n + 1) << ", min = " << min << ", nz = " << nz + << ", max = " << max << endl; for (size_t i = 0; i < 7; i++) { cout << schedulers[i].studentName(0) << " : " << schedulers[i].slotName( slots[i]) << endl; @@ -320,9 +317,8 @@ void accomodateStudent() { DiscreteValues values; values[dkey.first] = bestSlot; size_t count = (*root)(values); - cout << boost::format("%s = %d (%d), count = %d") % scheduler.studentName(0) - % scheduler.slotName(bestSlot) % bestSlot % count << endl; - + cout << scheduler.studentName(0) << " = " << scheduler.slotName(bestSlot) << " (" << bestSlot + << "), count = " << count << endl; // sample schedules for (size_t n = 0; n < 10; n++) { auto sample0 = chordal->sample(); diff --git a/gtsam_unstable/discrete/examples/schedulingQuals12.cpp b/gtsam_unstable/discrete/examples/schedulingQuals12.cpp index 8c4c0d728..913bfb21a 100644 --- a/gtsam_unstable/discrete/examples/schedulingQuals12.cpp +++ b/gtsam_unstable/discrete/examples/schedulingQuals12.cpp @@ -12,8 +12,6 @@ #include #include -#include - #include using namespace std; @@ -178,9 +176,6 @@ void solveStaged(size_t addMutex = 2) { gttoc_(eliminate); // find root node -// chordal->back()->print("back: "); -// chordal->front()->print("front: "); -// exit(0); DiscreteConditional::shared_ptr root = chordal->back(); if (debug) root->print(""/*scheduler.studentName(s)*/); @@ -196,8 +191,9 @@ void solveStaged(size_t addMutex = 2) { // remove this slot from consideration slotsAvailable[bestSlot] = 0.0; - cout << boost::format("%s = %d (%d), count = %d") % scheduler.studentName(NRSTUDENTS-1-s) - % scheduler.slotName(bestSlot) % bestSlot % count << endl; + cout << scheduler.studentName(NRSTUDENTS - 1 - s) << " = " << + scheduler.slotName(bestSlot) << " (" << bestSlot + << "), count = " << count << endl; } tictoc_print_(); } @@ -211,7 +207,6 @@ DiscreteBayesNet::shared_ptr createSampler(size_t i, SETDEBUG("Scheduler::buildGraph", false); scheduler.addStudentSpecificConstraints(0, slot); DiscreteBayesNet::shared_ptr chordal = scheduler.eliminate(); - // chordal->print(scheduler[i].studentKey(0).name()); // large ! schedulers.push_back(scheduler); return chordal; } @@ -238,9 +233,8 @@ void sampleSolutions() { size_t min = *min_element(stats.begin(), stats.end()); size_t nz = count_if(stats.begin(), stats.end(), NonZero); if (nz >= 15 && max <= 2) { - cout << boost::format( - "Sampled schedule %d, min = %d, nz = %d, max = %d\n") % (n + 1) % min - % nz % max; + cout << "Sampled schedule " << (n + 1) << ", min = " << min + << ", nz = " << nz << ", max = " << max << endl; for (size_t i = 0; i < NRSTUDENTS; i++) { cout << schedulers[i].studentName(0) << " : " << schedulers[i].slotName( slots[i]) << endl; diff --git a/gtsam_unstable/discrete/examples/schedulingQuals13.cpp b/gtsam_unstable/discrete/examples/schedulingQuals13.cpp index 06276c516..e52e3d0c6 100644 --- a/gtsam_unstable/discrete/examples/schedulingQuals13.cpp +++ b/gtsam_unstable/discrete/examples/schedulingQuals13.cpp @@ -12,8 +12,6 @@ #include #include -#include - #include using namespace std; @@ -218,8 +216,8 @@ void solveStaged(size_t addMutex = 2) { // remove this slot from consideration slotsAvailable[bestSlot] = 0.0; - cout << boost::format("%s = %d (%d), count = %d") % scheduler.studentName(NRSTUDENTS-1-s) - % scheduler.slotName(bestSlot) % bestSlot % count << endl; + cout << scheduler.studentName(NRSTUDENTS - 1 - s) << " = " << scheduler.slotName(bestSlot) << " (" << bestSlot + << "), count = " << count << endl; } tictoc_print_(); } @@ -263,9 +261,7 @@ void sampleSolutions() { size_t min = *min_element(stats.begin(), stats.end()); size_t nz = count_if(stats.begin(), stats.end(), NonZero); if (nz >= 16 && max <= 3) { - cout << boost::format( - "Sampled schedule %d, min = %d, nz = %d, max = %d\n") % (n + 1) % min - % nz % max; + cout << "Sampled schedule " << n + 1 << ", min = " << min << ", nz = " << nz << ", max = " << max << endl; for (size_t i = 0; i < NRSTUDENTS; i++) { cout << schedulers[i].studentName(0) << " : " << schedulers[i].slotName( slots[i]) << endl; diff --git a/gtsam_unstable/discrete/tests/CMakeLists.txt b/gtsam_unstable/discrete/tests/CMakeLists.txt index 2687a760c..1b66e5c85 100644 --- a/gtsam_unstable/discrete/tests/CMakeLists.txt +++ b/gtsam_unstable/discrete/tests/CMakeLists.txt @@ -1 +1,6 @@ -gtsamAddTestsGlob(discrete_unstable "test*.cpp" "" "gtsam_unstable") +set(excluded_sources "") +if (NOT GTSAM_USE_BOOST_FEATURES) + list(APPEND excluded_sources "testScheduler.cpp") +endif() + +gtsamAddTestsGlob(discrete_unstable "test*.cpp" "${excluded_sources}" "gtsam_unstable") diff --git a/gtsam_unstable/discrete/tests/testCSP.cpp b/gtsam_unstable/discrete/tests/testCSP.cpp index 5756cb99c..2b9a20ca6 100644 --- a/gtsam_unstable/discrete/tests/testCSP.cpp +++ b/gtsam_unstable/discrete/tests/testCSP.cpp @@ -173,9 +173,7 @@ TEST(CSP, WesternUS) { {6, 3}, {7, 2}, {8, 0}, {9, 1}, {10, 0}}; // Create ordering according to example in ND-CSP.lyx - Ordering ordering; - ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7), - Key(8), Key(9), Key(10); + const Ordering ordering{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; // Solve using that ordering: auto actualMPE = csp.optimize(ordering); diff --git a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp index 63df7d7c4..7c769fd78 100644 --- a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp +++ b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp @@ -11,7 +11,6 @@ #include #include -#include #include #include @@ -47,7 +46,7 @@ class LoopyBelief { void print(const std::string& s = "") const { cout << s << ":" << endl; star->print("Star graph: "); - for (Key key : correctedBeliefIndices | boost::adaptors::map_keys) { + for (const auto& [key, _] : correctedBeliefIndices) { cout << "Belief factor index for " << key << ": " << correctedBeliefIndices.at(key) << endl; } @@ -71,8 +70,8 @@ class LoopyBelief { /// print void print(const std::string& s = "") const { cout << s << ":" << endl; - for (Key key : starGraphs_ | boost::adaptors::map_keys) { - starGraphs_.at(key).print((boost::format("Node %d:") % key).str()); + for (const auto& [key, _] : starGraphs_) { + starGraphs_.at(key).print("Node " + std::to_string(key) + ":"); } } @@ -80,12 +79,10 @@ class LoopyBelief { DiscreteFactorGraph::shared_ptr iterate( const std::map& allDiscreteKeys) { static const bool debug = false; - static DiscreteConditional::shared_ptr - dummyCond; // unused by-product of elimination DiscreteFactorGraph::shared_ptr beliefs(new DiscreteFactorGraph()); std::map > allMessages; // Eliminate each star graph - for (Key key : starGraphs_ | boost::adaptors::map_keys) { + for (const auto& [key, _] : starGraphs_) { // cout << "***** Node " << key << "*****" << endl; // initialize belief to the unary factor from the original graph DecisionTreeFactor::shared_ptr beliefAtKey; @@ -94,15 +91,13 @@ class LoopyBelief { std::map messages; // eliminate each neighbor in this star graph one by one - for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices | - boost::adaptors::map_keys) { + for (const auto& [neighbor, _] : starGraphs_.at(key).correctedBeliefIndices) { DiscreteFactorGraph subGraph; for (size_t factor : starGraphs_.at(key).varIndex_[neighbor]) { subGraph.push_back(starGraphs_.at(key).star->at(factor)); } if (debug) subGraph.print("------- Subgraph:"); - DiscreteFactor::shared_ptr message; - boost::tie(dummyCond, message) = + const auto [dummyCond, message] = EliminateDiscrete(subGraph, Ordering{neighbor}); // store the new factor into messages messages.insert(make_pair(neighbor, message)); @@ -129,9 +124,11 @@ class LoopyBelief { val[key] = v; sum += (*beliefAtKey)(val); } + // TODO(kartikarcot): Check if this makes sense string sumFactorTable; - for (size_t v = 0; v < allDiscreteKeys.at(key).second; ++v) - sumFactorTable = (boost::format("%s %f") % sumFactorTable % sum).str(); + for (size_t v = 0; v < allDiscreteKeys.at(key).second; ++v) { + sumFactorTable = sumFactorTable + " " + std::to_string(sum); + } DecisionTreeFactor sumFactor(allDiscreteKeys.at(key), sumFactorTable); if (debug) sumFactor.print("denomFactor: "); beliefAtKey = @@ -143,11 +140,10 @@ class LoopyBelief { // Update corrected beliefs VariableIndex beliefFactors(*beliefs); - for (Key key : starGraphs_ | boost::adaptors::map_keys) { + for (const auto& [key, _] : starGraphs_) { std::map messages = allMessages[key]; - for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices | - boost::adaptors::map_keys) { - DecisionTreeFactor correctedBelief = + for (const auto& [neighbor, _] : starGraphs_.at(key).correctedBeliefIndices) { + DecisionTreeFactor correctedBelief = (*std::dynamic_pointer_cast( beliefs->at(beliefFactors[key].front()))) / (*std::dynamic_pointer_cast( @@ -175,7 +171,7 @@ class LoopyBelief { const std::map& allDiscreteKeys) const { StarGraphs starGraphs; VariableIndex varIndex(graph); ///< access to all factors of each node - for (Key key : varIndex | boost::adaptors::map_keys) { + for (const auto& [key, _] : varIndex) { // initialize to multiply with other unary factors later DecisionTreeFactor::shared_ptr prodOfUnaries; diff --git a/gtsam_unstable/dynamics/FullIMUFactor.h b/gtsam_unstable/dynamics/FullIMUFactor.h index 325321d13..02da899b7 100644 --- a/gtsam_unstable/dynamics/FullIMUFactor.h +++ b/gtsam_unstable/dynamics/FullIMUFactor.h @@ -10,8 +10,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam_unstable/dynamics/IMUFactor.h b/gtsam_unstable/dynamics/IMUFactor.h index 102c9311e..ee09600e5 100644 --- a/gtsam_unstable/dynamics/IMUFactor.h +++ b/gtsam_unstable/dynamics/IMUFactor.h @@ -10,8 +10,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam_unstable/dynamics/VelocityConstraint.h b/gtsam_unstable/dynamics/VelocityConstraint.h index bd6a40008..2bb70e1b5 100644 --- a/gtsam_unstable/dynamics/VelocityConstraint.h +++ b/gtsam_unstable/dynamics/VelocityConstraint.h @@ -10,8 +10,6 @@ #include #include -#include - namespace gtsam { namespace dynamics { diff --git a/gtsam_unstable/dynamics/tests/CMakeLists.txt b/gtsam_unstable/dynamics/tests/CMakeLists.txt index 493cef4fa..882d5f4e3 100644 --- a/gtsam_unstable/dynamics/tests/CMakeLists.txt +++ b/gtsam_unstable/dynamics/tests/CMakeLists.txt @@ -1 +1,7 @@ -gtsamAddTestsGlob(dynamics_unstable "test*.cpp" "" "gtsam_unstable") +set (excluded_tests "") + +# TODO(dellaert): these segfault, and are rather obsolete, so we stop compiling them: +list(APPEND excluded_tests "testIMUSystem.cpp" "testPoseRTV.cpp") + +# Build tests +gtsamAddTestsGlob(dynamics_unstable "test*.cpp" "${excluded_tests}" "gtsam_unstable") diff --git a/gtsam_unstable/dynamics/tests/testIMUSystem.cpp b/gtsam_unstable/dynamics/tests/testIMUSystem.cpp index c7e46e400..b7206264a 100644 --- a/gtsam_unstable/dynamics/tests/testIMUSystem.cpp +++ b/gtsam_unstable/dynamics/tests/testIMUSystem.cpp @@ -33,20 +33,20 @@ TEST(testIMUSystem, instantiations) { // just checking for compilation PoseRTV x1_v; - gtsam::SharedNoiseModel model1 = gtsam::noiseModel::Unit::Create(1); - gtsam::SharedNoiseModel model3 = gtsam::noiseModel::Unit::Create(3); - gtsam::SharedNoiseModel model6 = gtsam::noiseModel::Unit::Create(6); - gtsam::SharedNoiseModel model9 = gtsam::noiseModel::Unit::Create(9); + SharedNoiseModel model1 = noiseModel::Unit::Create(1); + SharedNoiseModel model3 = noiseModel::Unit::Create(3); + SharedNoiseModel model6 = noiseModel::Unit::Create(6); + SharedNoiseModel model9 = noiseModel::Unit::Create(9); Vector accel = Vector::Ones(3), gyro = Vector::Ones(3); IMUFactor imu(accel, gyro, 0.01, x1, x2, model6); FullIMUFactor full_imu(accel, gyro, 0.01, x1, x2, model9); - NonlinearEquality poseHardPrior(x1, x1_v); - BetweenFactor odom(x1, x2, x1_v, model9); - RangeFactor range(x1, x2, 1.0, model1); + NonlinearEquality poseHardPrior(x1, x1_v); + BetweenFactor odom(x1, x2, x1_v, model9); + RangeFactor range(x1, x2, 1.0, model1); VelocityConstraint constraint(x1, x2, 0.1, 10000); - PriorFactor posePrior(x1, x1_v, model9); + PriorFactor posePrior(x1, x1_v, model9); DHeightPrior heightPrior(x1, 0.1, model1); VelocityPrior velPrior(x1, Vector::Ones(3), model3); } @@ -68,13 +68,13 @@ TEST( testIMUSystem, optimize_chain ) { // assemble simple graph with IMU measurements and velocity constraints NonlinearFactorGraph graph; - graph += NonlinearEquality(x1, pose1); - graph += IMUFactor(imu12, dt, x1, x2, model); - graph += IMUFactor(imu23, dt, x2, x3, model); - graph += IMUFactor(imu34, dt, x3, x4, model); - graph += VelocityConstraint(x1, x2, dt); - graph += VelocityConstraint(x2, x3, dt); - graph += VelocityConstraint(x3, x4, dt); + graph.emplace_shared>(x1, pose1); + graph.emplace_shared>(imu12, dt, x1, x2, model); + graph.emplace_shared>(imu23, dt, x2, x3, model); + graph.emplace_shared>(imu34, dt, x3, x4, model); + graph.emplace_shared(x1, x2, dt); + graph.emplace_shared(x2, x3, dt); + graph.emplace_shared(x3, x4, dt); // ground truth values Values true_values; @@ -114,10 +114,10 @@ TEST( testIMUSystem, optimize_chain_fullfactor ) { // assemble simple graph with IMU measurements and velocity constraints NonlinearFactorGraph graph; - graph += NonlinearEquality(x1, pose1); - graph += FullIMUFactor(imu12, dt, x1, x2, model); - graph += FullIMUFactor(imu23, dt, x2, x3, model); - graph += FullIMUFactor(imu34, dt, x3, x4, model); + graph.emplace_shared>(x1, pose1); + graph.emplace_shared>(imu12, dt, x1, x2, model); + graph.emplace_shared>(imu23, dt, x2, x3, model); + graph.emplace_shared>(imu34, dt, x3, x4, model); // ground truth values Values true_values; @@ -156,7 +156,7 @@ TEST( testIMUSystem, linear_trajectory) { Values true_traj, init_traj; NonlinearFactorGraph graph; - graph += NonlinearEquality(x0, start); + graph.emplace_shared>(x0, start); true_traj.insert(x0, start); init_traj.insert(x0, start); @@ -165,7 +165,7 @@ TEST( testIMUSystem, linear_trajectory) { for (size_t i=1; i(accel - g, gyro, dt, xA, xB, model); + graph.emplace_shared>(accel - g, gyro, dt, xA, xB, model); true_traj.insert(xB, cur_pose); init_traj.insert(xB, PoseRTV()); } diff --git a/gtsam_unstable/examples/CMakeLists.txt b/gtsam_unstable/examples/CMakeLists.txt index da06b7dfc..967937b22 100644 --- a/gtsam_unstable/examples/CMakeLists.txt +++ b/gtsam_unstable/examples/CMakeLists.txt @@ -1,5 +1,12 @@ -set (excluded_examples - # fileToExclude.cpp -) +set (excluded_examples "") + +# Add examples to exclude if GTSAM_USE_BOOST_FEATURES is not set +if (NOT GTSAM_USE_BOOST_FEATURES) + # add to excluded examples + list (APPEND excluded_examples + "GncPoseAveragingExample.cpp" + ) +endif() + gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable") diff --git a/gtsam_unstable/examples/ConcurrentCalibration.cpp b/gtsam_unstable/examples/ConcurrentCalibration.cpp index 75dc49eee..cccde076a 100644 --- a/gtsam_unstable/examples/ConcurrentCalibration.cpp +++ b/gtsam_unstable/examples/ConcurrentCalibration.cpp @@ -32,7 +32,6 @@ #include #include #include -#include using namespace std; using namespace gtsam; diff --git a/gtsam_unstable/examples/GncPoseAveragingExample.cpp b/gtsam_unstable/examples/GncPoseAveragingExample.cpp index 730d342f0..183de81fc 100644 --- a/gtsam_unstable/examples/GncPoseAveragingExample.cpp +++ b/gtsam_unstable/examples/GncPoseAveragingExample.cpp @@ -27,7 +27,6 @@ #include #include #include -#include using namespace std; using namespace gtsam; diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp index 37becfef1..0e2822e60 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp @@ -77,7 +77,7 @@ list readOdometry() { // load the ranges from TD // Time (sec) Sender / Antenna ID Receiver Node ID Range (m) -typedef boost::tuple RangeTriple; +typedef std::tuple RangeTriple; vector readTriples() { vector triples; string tdFile = findExampleDataFile("Plaza1_TD.txt"); @@ -165,7 +165,7 @@ int main(int argc, char** argv) { //--------------------------------- odometry loop ----------------------------------------- double t; Pose2 odometry; - boost::tie(t, odometry) = timedOdometry; + std::tie(t, odometry) = timedOdometry; printf("step %d, time = %g\n",(int)i,t); // add odometry factor @@ -179,9 +179,9 @@ int main(int argc, char** argv) { landmarkEstimates.insert(i, predictedPose); // Check if there are range factors to be added - while (k < K && t >= boost::get<0>(triples[k])) { - size_t j = boost::get<1>(triples[k]); - double range = boost::get<2>(triples[k]); + while (k < K && t >= std::get<0>(triples[k])) { + size_t j = std::get<1>(triples[k]); + double range = std::get<2>(triples[k]); if (i > start) { if (smart && totalCount < minK) { try { diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp index 1e6f77b31..9a2863d68 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp @@ -76,7 +76,7 @@ list readOdometry() { // load the ranges from TD // Time (sec) Sender / Antenna ID Receiver Node ID Range (m) -typedef boost::tuple RangeTriple; +typedef std::tuple RangeTriple; vector readTriples() { vector triples; string tdFile = findExampleDataFile("Plaza2_TD.txt"); @@ -146,7 +146,7 @@ int main(int argc, char** argv) { //--------------------------------- odometry loop ----------------------------------------- double t; Pose2 odometry; - boost::tie(t, odometry) = timedOdometry; + std::tie(t, odometry) = timedOdometry; // add odometry factor newFactors.push_back( @@ -160,9 +160,9 @@ int main(int argc, char** argv) { landmarkEstimates.insert(i, predictedPose); // Check if there are range factors to be added - while (k < K && t >= boost::get<0>(triples[k])) { - size_t j = boost::get<1>(triples[k]); - double range = boost::get<2>(triples[k]); + while (k < K && t >= std::get<0>(triples[k])) { + size_t j = std::get<1>(triples[k]); + double range = std::get<2>(triples[k]); RangeFactor factor(i, symbol('L', j), range, rangeNoise); // Throw out obvious outliers based on current landmark estimates Vector error = factor.unwhitenedError(landmarkEstimates); diff --git a/gtsam_unstable/examples/TimeOfArrivalExample.cpp b/gtsam_unstable/examples/TimeOfArrivalExample.cpp index 8d496a30e..a97aa27be 100644 --- a/gtsam_unstable/examples/TimeOfArrivalExample.cpp +++ b/gtsam_unstable/examples/TimeOfArrivalExample.cpp @@ -24,8 +24,6 @@ #include #include -#include - #include using namespace std; diff --git a/gtsam_unstable/geometry/InvDepthCamera3.h b/gtsam_unstable/geometry/InvDepthCamera3.h index eb9045dff..45b27efe3 100644 --- a/gtsam_unstable/geometry/InvDepthCamera3.h +++ b/gtsam_unstable/geometry/InvDepthCamera3.h @@ -12,7 +12,6 @@ #pragma once #include -#include #include #include #include @@ -20,6 +19,10 @@ #include #include +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION +#include +#endif + namespace gtsam { /** diff --git a/gtsam_unstable/geometry/Pose3Upright.cpp b/gtsam_unstable/geometry/Pose3Upright.cpp index 9bc8efd54..9cd9f78f5 100644 --- a/gtsam_unstable/geometry/Pose3Upright.cpp +++ b/gtsam_unstable/geometry/Pose3Upright.cpp @@ -5,7 +5,6 @@ * @author Alex Cunningham */ -#include #include #include "gtsam/base/OptionalJacobian.h" #include "gtsam/base/Vector.h" diff --git a/gtsam_unstable/geometry/tests/testInvDepthCamera3.cpp b/gtsam_unstable/geometry/tests/testInvDepthCamera3.cpp index 11a24286a..6da048d7d 100644 --- a/gtsam_unstable/geometry/tests/testInvDepthCamera3.cpp +++ b/gtsam_unstable/geometry/tests/testInvDepthCamera3.cpp @@ -130,9 +130,7 @@ TEST(InvDepthFactor, backproject) InvDepthCamera3 inv_camera(level_pose,K); Point2 z = inv_camera.project(expected, inv_depth); - Vector5 actual_vec; - double actual_inv; - boost::tie(actual_vec, actual_inv) = inv_camera.backproject(z, 4); + const auto [actual_vec, actual_inv] = inv_camera.backproject(z, 4); EXPECT(assert_equal(expected,actual_vec,1e-7)); EXPECT_DOUBLES_EQUAL(inv_depth,actual_inv,1e-7); } @@ -146,9 +144,7 @@ TEST(InvDepthFactor, backproject2) InvDepthCamera3 inv_camera(Pose3(Rot3::Ypr(1.5,0.1, -1.5), Point3(-5, -5, 2)),K); Point2 z = inv_camera.project(expected, inv_depth); - Vector5 actual_vec; - double actual_inv; - boost::tie(actual_vec, actual_inv) = inv_camera.backproject(z, 10); + const auto [actual_vec, actual_inv] = inv_camera.backproject(z, 10); EXPECT(assert_equal(expected,actual_vec,1e-7)); EXPECT_DOUBLES_EQUAL(inv_depth,actual_inv,1e-7); } diff --git a/gtsam_unstable/linear/ActiveSetSolver-inl.h b/gtsam_unstable/linear/ActiveSetSolver-inl.h index cba639e5d..8e09d524f 100644 --- a/gtsam_unstable/linear/ActiveSetSolver-inl.h +++ b/gtsam_unstable/linear/ActiveSetSolver-inl.h @@ -45,7 +45,7 @@ namespace gtsam { * * We want the minimum of all those alphas among all inactive inequality. */ -Template boost::tuple This::computeStepSize( +Template std::tuple This::computeStepSize( const InequalityFactorGraph& workingSet, const VectorValues& xk, const VectorValues& p, const double& maxAlpha) const { double minAlpha = maxAlpha; @@ -74,7 +74,7 @@ Template boost::tuple This::computeStepSize( } } } - return boost::make_tuple(minAlpha, closestFactorIx); + return std::make_tuple(minAlpha, closestFactorIx); } /******************************************************************************/ @@ -219,10 +219,8 @@ Template typename This::State This::iterate( } else { // If we CAN make some progress, i.e. p_k != 0 // Adapt stepsize if some inactive constraints complain about this move - double alpha; - int factorIx; VectorValues p = newValues - state.values; - boost::tie(alpha, factorIx) = // using 16.41 + const auto [alpha, factorIx] = // using 16.41 computeStepSize(state.workingSet, state.values, p, POLICY::maxAlpha); // also add to the working set the one that complains the most InequalityFactorGraph newWorkingSet = state.workingSet; diff --git a/gtsam_unstable/linear/ActiveSetSolver.h b/gtsam_unstable/linear/ActiveSetSolver.h index ad8367c06..52a6b1265 100644 --- a/gtsam_unstable/linear/ActiveSetSolver.h +++ b/gtsam_unstable/linear/ActiveSetSolver.h @@ -20,7 +20,6 @@ #include #include -#include namespace gtsam { @@ -114,7 +113,7 @@ protected: * If there is a blocking constraint, the closest one will be added to the * working set and become active in the next iteration. */ - boost::tuple computeStepSize( + std::tuple computeStepSize( const InequalityFactorGraph& workingSet, const VectorValues& xk, const VectorValues& p, const double& maxAlpha) const; @@ -201,4 +200,4 @@ Key maxKey(const PROBLEM& problem) { } // namespace gtsam -#include \ No newline at end of file +#include diff --git a/gtsam_unstable/linear/LPInitSolver.cpp b/gtsam_unstable/linear/LPInitSolver.cpp index 8c3df3132..9f12d670e 100644 --- a/gtsam_unstable/linear/LPInitSolver.cpp +++ b/gtsam_unstable/linear/LPInitSolver.cpp @@ -66,7 +66,7 @@ GaussianFactorGraph::shared_ptr LPInitSolver::buildInitOfInitGraph() const { // create factor ||x||^2 and add to the graph const KeyDimMap& constrainedKeyDim = lp_.constrainedKeyDimMap(); - for (Key key : constrainedKeyDim | boost::adaptors::map_keys) { + for (const auto& [key, _] : constrainedKeyDim) { size_t dim = constrainedKeyDim.at(key); initGraph->push_back( JacobianFactor(key, Matrix::Identity(dim, dim), Vector::Zero(dim))); @@ -107,4 +107,4 @@ InequalityFactorGraph LPInitSolver::addSlackVariableToInequalities( return slackInequalities; } -} \ No newline at end of file +} diff --git a/gtsam_unstable/linear/tests/CMakeLists.txt b/gtsam_unstable/linear/tests/CMakeLists.txt index 43df23daa..b2cda1811 100644 --- a/gtsam_unstable/linear/tests/CMakeLists.txt +++ b/gtsam_unstable/linear/tests/CMakeLists.txt @@ -1 +1,10 @@ -gtsamAddTestsGlob(linear_unstable "test*.cpp" "" "gtsam_unstable") +# if GTSAM_USE_BOOST_FEATURES is OFF then exclude some tests +if (NOT GTSAM_USE_BOOST_FEATURES) + # create a semicolon seperated list of files to exclude + set(EXCLUDE_TESTS "testQPSolver.cpp") + message(STATUS "Excluding ${EXCLUDE_TESTS}") +else() + set(EXCLUDE_TESTS "${EXCLUDE_TESTS}") +endif() + +gtsamAddTestsGlob(linear_unstable "test*.cpp" "${EXCLUDE_TESTS}" "gtsam_unstable") diff --git a/gtsam_unstable/linear/tests/testLPSolver.cpp b/gtsam_unstable/linear/tests/testLPSolver.cpp index 53c8c7618..25e2d6a28 100644 --- a/gtsam_unstable/linear/tests/testLPSolver.cpp +++ b/gtsam_unstable/linear/tests/testLPSolver.cpp @@ -30,9 +30,6 @@ #include -#include -#include - using namespace std; using namespace gtsam; using namespace gtsam::symbol_shorthand; @@ -72,8 +69,7 @@ TEST(LPInitSolver, InfiniteLoopSingleVar) { LPSolver solver(lp); VectorValues starter; starter.insert(1, Vector3(0, 0, 2)); - VectorValues results, duals; - boost::tie(results, duals) = solver.optimize(starter); + const auto [results, duals] = solver.optimize(starter); VectorValues expected; expected.insert(1, Vector3(13.5, 6.5, -6.5)); CHECK(assert_equal(results, expected, 1e-7)); @@ -100,8 +96,7 @@ TEST(LPInitSolver, InfiniteLoopMultiVar) { starter.insert(X, kZero); starter.insert(Y, kZero); starter.insert(Z, Vector::Constant(1, 2.0)); - VectorValues results, duals; - boost::tie(results, duals) = solver.optimize(starter); + const auto [results, duals] = solver.optimize(starter); VectorValues expected; expected.insert(X, Vector::Constant(1, 13.5)); expected.insert(Y, Vector::Constant(1, 6.5)); @@ -200,8 +195,7 @@ TEST(LPSolver, SimpleTest1) { expected_x1.insert(1, Vector::Ones(2)); CHECK(assert_equal(expected_x1, x1, 1e-10)); - VectorValues result, duals; - boost::tie(result, duals) = lpSolver.optimize(init); + const auto [result, duals] = lpSolver.optimize(init); VectorValues expectedResult; expectedResult.insert(1, Vector2(8. / 3., 2. / 3.)); CHECK(assert_equal(expectedResult, result, 1e-10)); @@ -211,9 +205,9 @@ TEST(LPSolver, SimpleTest1) { TEST(LPSolver, TestWithoutInitialValues) { LP lp = simpleLP1(); LPSolver lpSolver(lp); - VectorValues result, duals, expectedResult; + VectorValues expectedResult; expectedResult.insert(1, Vector2(8. / 3., 2. / 3.)); - boost::tie(result, duals) = lpSolver.optimize(); + const auto [result, duals] = lpSolver.optimize(); CHECK(assert_equal(expectedResult, result)); } diff --git a/gtsam_unstable/linear/tests/testLinearEquality.cpp b/gtsam_unstable/linear/tests/testLinearEquality.cpp index 58af66a09..667ed4dea 100644 --- a/gtsam_unstable/linear/tests/testLinearEquality.cpp +++ b/gtsam_unstable/linear/tests/testLinearEquality.cpp @@ -29,8 +29,9 @@ GTSAM_CONCEPT_TESTABLE_INST(LinearEquality) namespace { namespace simple { // Terms we'll use -const vector > terms{ - make_pair(5, I_3x3), make_pair(10, 2 * I_3x3), make_pair(15, 3 * I_3x3)}; +using Terms = vector >; +const Terms terms{make_pair(5, I_3x3), make_pair(10, 2 * I_3x3), + make_pair(15, 3 * I_3x3)}; // RHS and sigmas const Vector b = (Vector(3) << 1., 2., 3.).finished(); @@ -45,8 +46,7 @@ TEST(LinearEquality, constructors_and_accessors) { // Test for using different numbers of terms { // One term constructor - LinearEquality expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 1), b, 0); + LinearEquality expected(Terms(terms.begin(), terms.begin() + 1), b, 0); LinearEquality actual(terms[0].first, terms[0].second, b, 0); EXPECT(assert_equal(expected, actual)); LONGS_EQUAL((long)terms[0].first, (long)actual.keys().back()); @@ -57,8 +57,7 @@ TEST(LinearEquality, constructors_and_accessors) { } { // Two term constructor - LinearEquality expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 2), b, 0); + LinearEquality expected(Terms(terms.begin(), terms.begin() + 2), b, 0); LinearEquality actual(terms[0].first, terms[0].second, terms[1].first, terms[1].second, b, 0); EXPECT(assert_equal(expected, actual)); @@ -70,8 +69,7 @@ TEST(LinearEquality, constructors_and_accessors) { } { // Three term constructor - LinearEquality expected( - boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, 0); + LinearEquality expected(Terms(terms.begin(), terms.begin() + 3), b, 0); LinearEquality actual(terms[0].first, terms[0].second, terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, 0); @@ -202,9 +200,7 @@ TEST(LinearEquality, operators) { EXPECT(assert_equal(expectedX, actualX)); // test gradient at zero - Matrix A; - Vector b2; - boost::tie(A, b2) = lf.jacobian(); + const auto [A, b2] = lf.jacobian(); VectorValues expectedG; expectedG.insert(1, (Vector(2) << 0.2, -0.1).finished()); expectedG.insert(2, (Vector(2) << -0.2, 0.1).finished()); diff --git a/gtsam_unstable/nonlinear/ConcurrentFilteringAndSmoothing.cpp b/gtsam_unstable/nonlinear/ConcurrentFilteringAndSmoothing.cpp index 9dc5b3fad..7bbcdd897 100644 --- a/gtsam_unstable/nonlinear/ConcurrentFilteringAndSmoothing.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentFilteringAndSmoothing.cpp @@ -78,7 +78,7 @@ NonlinearFactorGraph calculateMarginalFactors(const NonlinearFactorGraph& graph, NonlinearFactorGraph marginalFactors; marginalFactors.reserve(marginalLinearFactors.size()); for(const GaussianFactor::shared_ptr& gaussianFactor: marginalLinearFactors) { - marginalFactors += std::make_shared(gaussianFactor, theta); + marginalFactors.emplace_shared(gaussianFactor, theta); } return marginalFactors; diff --git a/gtsam_unstable/nonlinear/LinearizedFactor.cpp b/gtsam_unstable/nonlinear/LinearizedFactor.cpp index a3f2132f8..2d404005d 100644 --- a/gtsam_unstable/nonlinear/LinearizedFactor.cpp +++ b/gtsam_unstable/nonlinear/LinearizedFactor.cpp @@ -16,7 +16,6 @@ */ #include -#include #include namespace gtsam { @@ -69,8 +68,9 @@ void LinearizedJacobianFactor::print(const std::string& s, const KeyFormatter& k std::cout << keyFormatter(key) << " "; std::cout << std::endl; - for(const_iterator key=begin(); key!=end(); ++key) - std::cout << boost::format("A[%1%]=\n")%keyFormatter(*key) << A(*key) << std::endl; + for(const_iterator key=begin(); key!=end(); ++key) { + std::cout << "A[" << keyFormatter(*key) << "]=\n" << A(*key) << std::endl; + } std::cout << "b=\n" << b() << std::endl; lin_points_.print("Linearization Point: "); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp index 7c6a08278..401dee762 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp @@ -468,7 +468,7 @@ TEST( ConcurrentIncrementalFilter, update_and_marginalize_2 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -594,7 +594,7 @@ TEST( ConcurrentIncrementalFilter, synchronize_1 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -641,7 +641,7 @@ TEST( ConcurrentIncrementalFilter, synchronize_2 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -711,7 +711,7 @@ TEST( ConcurrentIncrementalFilter, synchronize_3 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -798,7 +798,7 @@ TEST( ConcurrentIncrementalFilter, synchronize_4 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -893,7 +893,7 @@ TEST( ConcurrentIncrementalFilter, synchronize_5 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -1182,7 +1182,7 @@ TEST( ConcurrentIncrementalFilter, removeFactors_topology_1 ) { // Create a set of optimizer parameters ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -1241,7 +1241,7 @@ TEST( ConcurrentIncrementalFilter, removeFactors_topology_2 ) // we try removing the last factor ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -1300,7 +1300,7 @@ TEST( ConcurrentIncrementalFilter, removeFactors_topology_3 ) // we try removing the first factor ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; @@ -1357,7 +1357,7 @@ TEST( ConcurrentIncrementalFilter, removeFactors_values ) // we try removing the last factor ISAM2Params parameters; - parameters.relinearizeThreshold = 0; + parameters.relinearizeThreshold = 0.; // ISAM2 checks whether to relinearize or not a variable only every relinearizeSkip steps and the // default value for that is 10 (if you set that to zero the code will crash) parameters.relinearizeSkip = 1; diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp index 0e01112eb..b9dca7a7c 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp @@ -584,10 +584,10 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 ) allkeys.erase(key); } KeyVector variables(allkeys.begin(), allkeys.end()); - std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); + const auto [bn, fg] = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); expectedSmootherSummarization.resize(0); - for(const GaussianFactor::shared_ptr& factor: *result.second) { + for(const GaussianFactor::shared_ptr& factor: *fg) { expectedSmootherSummarization.push_back(LinearContainerFactor(factor, allValues)); } diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp index a33fcc481..4e7baa6ca 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp @@ -584,10 +584,10 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 ) KeySet allkeys = LinFactorGraph->keys(); for (const auto key : filterSeparatorValues.keys()) allkeys.erase(key); KeyVector variables(allkeys.begin(), allkeys.end()); - std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); + const auto [bn, fg] = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); expectedSmootherSummarization.resize(0); - for(const GaussianFactor::shared_ptr& factor: *result.second) { + for(const GaussianFactor::shared_ptr& factor: *fg) { expectedSmootherSummarization.push_back(LinearContainerFactor(factor, allValues)); } diff --git a/gtsam_unstable/nonlinear/tests/testCustomChartExpression.cpp b/gtsam_unstable/nonlinear/tests/testCustomChartExpression.cpp index a7e266cb5..5b98540e2 100644 --- a/gtsam_unstable/nonlinear/tests/testCustomChartExpression.cpp +++ b/gtsam_unstable/nonlinear/tests/testCustomChartExpression.cpp @@ -19,6 +19,8 @@ #include #include +#include + using namespace gtsam; @@ -46,8 +48,8 @@ struct ProjectionChart { namespace gtsam { namespace traits { -template<> struct is_chart : public boost::true_type {}; -template<> struct dimension : public boost::integral_constant {}; +template<> struct is_chart : public std::true_type {}; +template<> struct dimension : public std::integral_constant {}; } // namespace traits } // namespace gtsam diff --git a/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp b/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp index 9083ab172..38cfd7348 100644 --- a/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp +++ b/gtsam_unstable/nonlinear/tests/testNonlinearClusterTree.cpp @@ -86,8 +86,8 @@ TEST(NonlinearClusterTree, Clusters) { // Calculate expected result of only evaluating the marginalCluster Ordering ordering; ordering.push_back(x1); - auto expected = gfg->eliminatePartialSequential(ordering); - auto expectedFactor = std::dynamic_pointer_cast(expected.second->at(0)); + const auto [bn, fg] = gfg->eliminatePartialSequential(ordering); + auto expectedFactor = std::dynamic_pointer_cast(fg->at(0)); if (!expectedFactor) throw std::runtime_error("Expected HessianFactor"); @@ -95,7 +95,7 @@ TEST(NonlinearClusterTree, Clusters) { auto actual = marginalCluster->linearizeAndEliminate(initial); const GaussianBayesNet& bayesNet = actual.first; const HessianFactor& factor = *actual.second; - EXPECT(assert_equal(*expected.first->at(0), *bayesNet.at(0), 1e-6)); + EXPECT(assert_equal(*bn->at(0), *bayesNet.at(0), 1e-6)); EXPECT(assert_equal(*expectedFactor, factor, 1e-6)); } diff --git a/gtsam_unstable/partition/FindSeparator-inl.h b/gtsam_unstable/partition/FindSeparator-inl.h index 45868e78e..e991c5af2 100644 --- a/gtsam_unstable/partition/FindSeparator-inl.h +++ b/gtsam_unstable/partition/FindSeparator-inl.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -250,9 +249,7 @@ namespace gtsam { namespace partition { prepareMetisGraph(graph, keys, workspace, &xadj, &adjncy, &adjwgt); // run ND on the graph - size_t sepsize; - sharedInts part; - boost::tie(sepsize, part) = separatorMetis(numKeys, xadj, adjncy, adjwgt, verbose); + const auto [sepsize, part] = separatorMetis(numKeys, xadj, adjncy, adjwgt, verbose); if (!sepsize) return std::optional(); // convert the 0-1-2 from Metis to 1-2-0, so that the separator is 0, as later @@ -310,9 +307,7 @@ namespace gtsam { namespace partition { prepareMetisGraph(graph, keys, workspace, &xadj, &adjncy, &adjwgt); // run metis on the graph - int edgecut; - sharedInts part; - boost::tie(edgecut, part) = edgeMetis(numKeys, xadj, adjncy, adjwgt, verbose); + const auto [edgecut, part] = edgeMetis(numKeys, xadj, adjncy, adjwgt, verbose); // convert the 0-1-2 from Metis to 1-2-0, so that the separator is 0, as later we will have more submaps MetisResult result; diff --git a/gtsam_unstable/partition/GenericGraph.cpp b/gtsam_unstable/partition/GenericGraph.cpp index 5f32ab557..4d9bf4f63 100644 --- a/gtsam_unstable/partition/GenericGraph.cpp +++ b/gtsam_unstable/partition/GenericGraph.cpp @@ -6,7 +6,7 @@ * Description: generic graph types used in partitioning */ #include -#include +#include #include @@ -464,9 +464,9 @@ namespace gtsam { namespace partition { } if (minFoundConstraintsPerCamera < minNrConstraintsPerCamera) - throw runtime_error("checkSingularity:minConstraintsPerCamera < " + boost::lexical_cast(minFoundConstraintsPerCamera)); + throw runtime_error("checkSingularity:minConstraintsPerCamera < " + std::to_string(minFoundConstraintsPerCamera)); if (minFoundConstraintsPerLandmark < minNrConstraintsPerLandmark) - throw runtime_error("checkSingularity:minConstraintsPerLandmark < " + boost::lexical_cast(minFoundConstraintsPerLandmark)); + throw runtime_error("checkSingularity:minConstraintsPerLandmark < " + std::to_string(minFoundConstraintsPerLandmark)); } }} // namespace diff --git a/gtsam_unstable/partition/NestedDissection-inl.h b/gtsam_unstable/partition/NestedDissection-inl.h index c377998dd..c5ba55ff2 100644 --- a/gtsam_unstable/partition/NestedDissection-inl.h +++ b/gtsam_unstable/partition/NestedDissection-inl.h @@ -20,9 +20,7 @@ namespace gtsam { namespace partition { NestedDissection::NestedDissection( const NLG& fg, const Ordering& ordering, const int numNodeStopPartition, const int minNodesPerMap, const bool verbose) : fg_(fg), ordering_(ordering){ - GenericUnaryGraph unaryFactors; - GenericGraph gfg; - boost::tie(unaryFactors, gfg) = fg.createGenericGraph(ordering); + const auto [unaryFactors, gfg] = fg.createGenericGraph(ordering); // build reverse mapping from integer to symbol int numNodes = ordering.size(); @@ -44,9 +42,7 @@ namespace gtsam { namespace partition { template NestedDissection::NestedDissection( const NLG& fg, const Ordering& ordering, const std::shared_ptr& cuts, const bool verbose) : fg_(fg), ordering_(ordering){ - GenericUnaryGraph unaryFactors; - GenericGraph gfg; - boost::tie(unaryFactors, gfg) = fg.createGenericGraph(ordering); + const auto [unaryFactors, gfg] = fg.createGenericGraph(ordering); // build reverse mapping from integer to symbol int numNodes = ordering.size(); diff --git a/gtsam_unstable/partition/tests/CMakeLists.txt b/gtsam_unstable/partition/tests/CMakeLists.txt index d89a1fe98..0b918e497 100644 --- a/gtsam_unstable/partition/tests/CMakeLists.txt +++ b/gtsam_unstable/partition/tests/CMakeLists.txt @@ -1,2 +1,7 @@ set(ignore_test "testNestedDissection.cpp") + +if (NOT GTSAM_USE_BOOST_FEATURES) + list(APPEND ignore_test "testFindSeparator.cpp") +endif() + gtsamAddTestsGlob(partition "test*.cpp" "${ignore_test}" "gtsam_unstable;gtsam;metis-gtsam-if") diff --git a/gtsam_unstable/partition/tests/testNestedDissection.cpp b/gtsam_unstable/partition/tests/testNestedDissection.cpp index da896e33d..be2d7af7f 100644 --- a/gtsam_unstable/partition/tests/testNestedDissection.cpp +++ b/gtsam_unstable/partition/tests/testNestedDissection.cpp @@ -34,7 +34,7 @@ TEST ( NestedDissection, oneIsland ) fg.addBearingRange(2, 1, Rot2(), 0., bearingRangeNoise); fg.addPoseConstraint(1, Pose2()); - Ordering ordering; ordering += x1, x2, l1; + const Ordering ordering{x1, x2, l1}; int numNodeStopPartition = 1e3; int minNodesPerMap = 1e3; @@ -61,7 +61,7 @@ TEST ( NestedDissection, TwoIslands ) fg.addOdometry(3, 5, Pose2(), odoNoise); fg.addPoseConstraint(1, Pose2()); fg.addPoseConstraint(4, Pose2()); - Ordering ordering; ordering += x1, x2, x3, x4, x5; + const Ordering ordering{x1, x2, x3, x4, x5}; int numNodeStopPartition = 2; int minNodesPerMap = 1; @@ -96,7 +96,7 @@ TEST ( NestedDissection, FourIslands ) fg.addOdometry(3, 5, Pose2(), odoNoise); fg.addPoseConstraint(1, Pose2()); fg.addPoseConstraint(4, Pose2()); - Ordering ordering; ordering += x1, x2, x3, x4, x5; + const Ordering ordering{x1, x2, x3, x4, x5}; int numNodeStopPartition = 2; int minNodesPerMap = 1; @@ -144,7 +144,7 @@ TEST ( NestedDissection, weekLinks ) fg.addPoseConstraint(1, Pose2()); fg.addPoseConstraint(4, Pose2()); fg.addPoseConstraint(5, Pose2()); - Ordering ordering; ordering += x1, x2, x3, x4, x5, l6; + const Ordering ordering{x1, x2, x3, x4, x5, l6}; int numNodeStopPartition = 2; int minNodesPerMap = 1; @@ -206,7 +206,7 @@ TEST ( NestedDissection, manual_cuts ) fg.addPrior(x0, Pose2(0.1, 0, 0), priorNoise); // generate ordering - Ordering ordering; ordering += x0, x1, x2, l1, l2, l3, l4, l5, l6; + const Ordering ordering{x0, x1, x2, l1, l2, l3, l4, l5, l6}; // define cuts std::shared_ptr cuts(new Cuts()); @@ -301,9 +301,9 @@ TEST( NestedDissection, Graph3D) { graph.addMeasurement(3, j, cameras[3].project(points[j-1]).expmap(measurementZeroNoise->sample()), measurementNoise); // make an easy ordering - Ordering ordering; ordering += x0, x1, x2, x3; + const Ordering ordering{x0, x1, x2, x3}; for (int j=1; j<=24; j++) - ordering += Symbol('l', j); + ordering.push_back(Symbol('l', j)); // nested dissection const int numNodeStopPartition = 10; diff --git a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h index f62157845..0accb8f42 100644 --- a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h +++ b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h @@ -27,8 +27,6 @@ // Using numerical derivative to calculate d(Pose3::Expmap)/dw #include -#include - #include namespace gtsam { diff --git a/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h b/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h index 91dba26e6..8fda5456b 100644 --- a/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h +++ b/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h @@ -26,8 +26,6 @@ // Using numerical derivative to calculate d(Pose3::Expmap)/dw #include -#include - #include namespace gtsam { diff --git a/gtsam_unstable/slam/InvDepthFactorVariant1.h b/gtsam_unstable/slam/InvDepthFactorVariant1.h index 23f8bd3e0..a49b2090c 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant1.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant1.h @@ -17,8 +17,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam_unstable/slam/InvDepthFactorVariant2.h b/gtsam_unstable/slam/InvDepthFactorVariant2.h index 63fcb4d8c..7abbbe89a 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant2.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant2.h @@ -18,8 +18,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam_unstable/slam/InvDepthFactorVariant3.h b/gtsam_unstable/slam/InvDepthFactorVariant3.h index 22a725d47..8664002e8 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant3.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant3.h @@ -17,8 +17,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam_unstable/slam/SmartStereoProjectionFactor.h b/gtsam_unstable/slam/SmartStereoProjectionFactor.h index 0893af75e..52d6eda05 100644 --- a/gtsam_unstable/slam/SmartStereoProjectionFactor.h +++ b/gtsam_unstable/slam/SmartStereoProjectionFactor.h @@ -30,7 +30,10 @@ #include #include +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION #include +#endif + #include #include @@ -501,7 +504,7 @@ public: private: -#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /// +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /// Serialization function friend class boost::serialization::access; template diff --git a/gtsam_unstable/slam/tests/CMakeLists.txt b/gtsam_unstable/slam/tests/CMakeLists.txt index bb5259ef2..6872dd575 100644 --- a/gtsam_unstable/slam/tests/CMakeLists.txt +++ b/gtsam_unstable/slam/tests/CMakeLists.txt @@ -2,6 +2,7 @@ # Exclude tests that don't work set (slam_excluded_tests testSerialization.cpp + testSmartStereoProjectionFactorPP.cpp # unstable after PR #1442 ) gtsamAddTestsGlob(slam_unstable "test*.cpp" "${slam_excluded_tests}" "gtsam_unstable") diff --git a/gtsam_unstable/slam/tests/testAHRS.cpp b/gtsam_unstable/slam/tests/testAHRS.cpp index d0980f452..0a82d67ff 100644 --- a/gtsam_unstable/slam/tests/testAHRS.cpp +++ b/gtsam_unstable/slam/tests/testAHRS.cpp @@ -70,17 +70,15 @@ TEST (AHRS, constructor) { /* ************************************************************************* */ // TODO make a testMechanization_bRn2 -TEST (AHRS, Mechanization_integrate) { - AHRS ahrs = AHRS(stationaryU,stationaryF,g_e); - Mechanization_bRn2 mech; - KalmanFilter::State state; -// boost::tie(mech,state) = ahrs.initialize(g_e); -// Vector u = Vector3(0.05,0.0,0.0); -// double dt = 2; -// Rot3 expected; -// Mechanization_bRn2 mech2 = mech.integrate(u,dt); -// Rot3 actual = mech2.bRn(); -// EXPECT(assert_equal(expected, actual)); +TEST(AHRS, Mechanization_integrate) { + AHRS ahrs = AHRS(stationaryU, stationaryF, g_e); + // const auto [mech, state] = ahrs.initialize(g_e); + // Vector u = Vector3(0.05, 0.0, 0.0); + // double dt = 2; + // Rot3 expected; + // Mechanization_bRn2 mech2 = mech.integrate(u, dt); + // Rot3 actual = mech2.bRn(); + // EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ diff --git a/gtsam_unstable/slam/tests/testEquivInertialNavFactor_GlobalVel.cpp b/gtsam_unstable/slam/tests/testEquivInertialNavFactor_GlobalVel.cpp index 644283512..33a48cf7a 100644 --- a/gtsam_unstable/slam/tests/testEquivInertialNavFactor_GlobalVel.cpp +++ b/gtsam_unstable/slam/tests/testEquivInertialNavFactor_GlobalVel.cpp @@ -22,7 +22,6 @@ #include #include -#include #include #include diff --git a/gtsam_unstable/slam/tests/testInvDepthFactor3.cpp b/gtsam_unstable/slam/tests/testInvDepthFactor3.cpp index 8a81c1f24..64af687d4 100644 --- a/gtsam_unstable/slam/tests/testInvDepthFactor3.cpp +++ b/gtsam_unstable/slam/tests/testInvDepthFactor3.cpp @@ -62,10 +62,9 @@ TEST( InvDepthFactor, optimize) { gtsam::NonlinearFactorGraph graph; Values initial; - InverseDepthFactor::shared_ptr factor(new InverseDepthFactor(expected_uv, sigma, - Symbol('x',1), Symbol('l',1), Symbol('d',1), K)); - graph.push_back(factor); - graph += PoseConstraint(Symbol('x',1),level_pose); + graph.emplace_shared(expected_uv, sigma, + Symbol('x',1), Symbol('l',1), Symbol('d',1), K); + graph.emplace_shared(Symbol('x', 1), level_pose); initial.insert(Symbol('x',1), level_pose); initial.insert(Symbol('l',1), inv_landmark); initial.insert(Symbol('d',1), inv_depth); @@ -91,7 +90,7 @@ TEST( InvDepthFactor, optimize) { Symbol('x',2), Symbol('l',1),Symbol('d',1),K)); graph.push_back(factor1); - graph += PoseConstraint(Symbol('x',2),right_pose); + graph.emplace_shared(Symbol('x',2),right_pose); initial.insert(Symbol('x',2), right_pose); @@ -118,10 +117,8 @@ TEST( InvDepthFactor, Jacobian3D ) { Point2 expected_uv = level_camera.project(landmark); // get expected landmark representation using backprojection - double inv_depth; - Vector5 inv_landmark; InvDepthCamera3 inv_camera(level_pose, K); - std::tie(inv_landmark, inv_depth) = inv_camera.backproject(expected_uv, 5); + const auto [inv_landmark, inv_depth] = inv_camera.backproject(expected_uv, 5); Vector5 expected_inv_landmark((Vector(5) << 0., 0., 1., 0., 0.).finished()); CHECK(assert_equal(expected_inv_landmark, inv_landmark, 1e-6)); diff --git a/gtsam_unstable/slam/tests/testMultiProjectionFactor.cpp b/gtsam_unstable/slam/tests/testMultiProjectionFactor.cpp index 141a50465..1d438b457 100644 --- a/gtsam_unstable/slam/tests/testMultiProjectionFactor.cpp +++ b/gtsam_unstable/slam/tests/testMultiProjectionFactor.cpp @@ -73,10 +73,8 @@ TEST( MultiProjectionFactor, create ){ views.insert(x2); views.insert(x3); - MultiProjectionFactor mpFactor(n_measPixel, noiseProjection, views, l1, K); - graph += mpFactor; - - + graph.emplace_shared>( + n_measPixel, noiseProjection, views, l1, K); } diff --git a/gtsam_unstable/slam/tests/testTOAFactor.cpp b/gtsam_unstable/slam/tests/testTOAFactor.cpp index bc5c553e0..b6628c138 100644 --- a/gtsam_unstable/slam/tests/testTOAFactor.cpp +++ b/gtsam_unstable/slam/tests/testTOAFactor.cpp @@ -25,7 +25,6 @@ #include #include -#include using namespace std; using namespace gtsam; diff --git a/gtsam_unstable/timing/timeDSFvariants.cpp b/gtsam_unstable/timing/timeDSFvariants.cpp index 35ae7d4d5..7cc66b5b3 100644 --- a/gtsam_unstable/timing/timeDSFvariants.cpp +++ b/gtsam_unstable/timing/timeDSFvariants.cpp @@ -21,8 +21,6 @@ #include #include -#include - #include #include #include @@ -31,7 +29,6 @@ using namespace std; using namespace gtsam; -using boost::format; int main(int argc, char* argv[]) { @@ -52,8 +49,7 @@ int main(int argc, char* argv[]) { volatile double fpm = 0.5; // fraction of points matched volatile size_t nm = fpm * n * np; // number of matches - cout << format("\nTesting with %1% images, %2% points, %3% matches\n") - % (int)m % (int)N % (int)nm; + cout << "\nTesting with " << (int)m << " images, " << (int)N << " points, " << (int)nm << " matches\n"; cout << "Generating " << nm << " matches" << endl; std::mt19937 rng; std::uniform_int_distribution<> rn(0, N - 1); @@ -64,7 +60,7 @@ int main(int argc, char* argv[]) { for (size_t k = 0; k < nm; k++) matches.push_back(Match(rn(rng), rn(rng))); - os << format("%1%,%2%,%3%,") % (int)m % (int)N % (int)nm; + os << (int)m << "," << (int)N << "," << (int)nm << ","; { // DSFBase version @@ -77,7 +73,7 @@ int main(int argc, char* argv[]) { tictoc_getNode(dsftimeNode, dsftime); dsftime = dsftimeNode->secs(); os << dsftime << ","; - cout << format("DSFBase: %1% s") % dsftime << endl; + cout << "DSFBase: " << dsftime << " s" << endl; tictoc_reset_(); } @@ -92,7 +88,7 @@ int main(int argc, char* argv[]) { tictoc_getNode(dsftimeNode, dsftime); dsftime = dsftimeNode->secs(); os << dsftime << endl; - cout << format("DSFMap: %1% s") % dsftime << endl; + cout << "DSFMap: " << dsftime << " s" << endl; tictoc_reset_(); } @@ -109,7 +105,7 @@ int main(int argc, char* argv[]) { tictoc_getNode(dsftimeNode, dsftime); dsftime = dsftimeNode->secs(); os << dsftime << endl; - cout << format("DSF functional: %1% s") % dsftime << endl; + cout << "DSF functional: " << dsftime << " s" << endl; tictoc_reset_(); } @@ -126,7 +122,7 @@ int main(int argc, char* argv[]) { tictoc_getNode(dsftimeNode, dsftime); dsftime = dsftimeNode->secs(); os << dsftime << ","; - cout << format("DSF in-place: %1% s") % dsftime << endl; + cout << "DSF in-place: " << dsftime << " s" << endl; tictoc_reset_(); } diff --git a/matlab/CMakeLists.txt b/matlab/CMakeLists.txt index a77db06a4..81132fbcf 100644 --- a/matlab/CMakeLists.txt +++ b/matlab/CMakeLists.txt @@ -73,6 +73,7 @@ set(interface_files ${GTSAM_SOURCE_DIR}/gtsam/geometry/geometry.i ${GTSAM_SOURCE_DIR}/gtsam/linear/linear.i ${GTSAM_SOURCE_DIR}/gtsam/nonlinear/nonlinear.i + ${GTSAM_SOURCE_DIR}/gtsam/nonlinear/values.i ${GTSAM_SOURCE_DIR}/gtsam/nonlinear/custom.i ${GTSAM_SOURCE_DIR}/gtsam/symbolic/symbolic.i ${GTSAM_SOURCE_DIR}/gtsam/sam/sam.i diff --git a/package.xml b/package.xml index 341c78ba3..6b2c95f3c 100644 --- a/package.xml +++ b/package.xml @@ -1,11 +1,25 @@ - + + gtsam - 4.1.0 + 4.3.0 gtsam Frank Dellaert - BSD + + + BSD-3-Clause + + BSD-3-Clause + BSD-3-Clause + MPL-2.0 + MIT + Apache-2.0 + MPL-2.0 + + + Fan Jiang + José Luis Blanco-Claraco cmake diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index b79c5f9ea..28bad4ad2 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -64,6 +64,7 @@ set(interface_headers ${PROJECT_SOURCE_DIR}/gtsam/geometry/geometry.i ${PROJECT_SOURCE_DIR}/gtsam/linear/linear.i ${PROJECT_SOURCE_DIR}/gtsam/nonlinear/nonlinear.i + ${PROJECT_SOURCE_DIR}/gtsam/nonlinear/values.i ${PROJECT_SOURCE_DIR}/gtsam/nonlinear/custom.i ${PROJECT_SOURCE_DIR}/gtsam/symbolic/symbolic.i ${PROJECT_SOURCE_DIR}/gtsam/sam/sam.i diff --git a/python/gtsam/preamble/values.h b/python/gtsam/preamble/values.h new file mode 100644 index 000000000..d07a75f6f --- /dev/null +++ b/python/gtsam/preamble/values.h @@ -0,0 +1,12 @@ +/* Please refer to: + * https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html + * These are required to save one copy operation on Python calls. + * + * NOTES + * ================= + * + * `PYBIND11_MAKE_OPAQUE` will mark the type as "opaque" for the pybind11 + * automatic STL binding, such that the raw objects can be accessed in Python. + * Without this they will be automatically converted to a Python object, and all + * mutations on Python side will not be reflected on C++. + */ diff --git a/python/gtsam/specializations/values.h b/python/gtsam/specializations/values.h new file mode 100644 index 000000000..da8842eaf --- /dev/null +++ b/python/gtsam/specializations/values.h @@ -0,0 +1,12 @@ +/* Please refer to: + * https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html + * These are required to save one copy operation on Python calls. + * + * NOTES + * ================= + * + * `py::bind_vector` and similar machinery gives the std container a Python-like + * interface, but without the `` copying mechanism. Combined + * with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python, + * and saves one copy operation. + */ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 5eaad45dc..d7b68c4ec 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,14 +1,23 @@ # exclude certain files # note the source dir on each -set (tests_exclude "") +set (excluded_tests "") if (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") # might not be best test - Richard & Jason & Frank # clang linker segfaults on large testSerializationSlam - list (APPEND tests_exclude "testSerializationSlam.cpp") + list (APPEND excluded_tests "testSerializationSlam.cpp") +endif() + +if (NOT GTSAM_USE_BOOST_FEATURES) + list(APPEND excluded_tests "testGncOptimizer.cpp") + list(APPEND excluded_tests "testGraph.cpp") +endif() + +if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION) + list(APPEND excluded_tests "testSerializationSLAM.cpp") endif() # Build tests -gtsamAddTestsGlob(tests "test*.cpp" "${tests_exclude}" "gtsam") +gtsamAddTestsGlob(tests "test*.cpp" "${excluded_tests}" "gtsam") if(MSVC) set_property(SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/testSerializationSlam.cpp" diff --git a/tests/smallExample.h b/tests/smallExample.h index 08341245c..c5ab10594 100644 --- a/tests/smallExample.h +++ b/tests/smallExample.h @@ -274,16 +274,16 @@ inline GaussianFactorGraph createGaussianFactorGraph() { GaussianFactorGraph fg; // linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_] - fg += JacobianFactor(X(1), 10*I_2x2, -1.0*Vector::Ones(2)); + fg.emplace_shared(X(1), 10*I_2x2, -1.0*Vector::Ones(2)); // odometry between x1 and x2: x2-x1=[0.2;-0.1] - fg += JacobianFactor(X(1), -10*I_2x2, X(2), 10*I_2x2, Vector2(2.0, -1.0)); + fg.emplace_shared(X(1), -10*I_2x2, X(2), 10*I_2x2, Vector2(2.0, -1.0)); // measurement between x1 and l1: l1-x1=[0.0;0.2] - fg += JacobianFactor(X(1), -5*I_2x2, L(1), 5*I_2x2, Vector2(0.0, 1.0)); + fg.emplace_shared(X(1), -5*I_2x2, L(1), 5*I_2x2, Vector2(0.0, 1.0)); // measurement between x2 and l1: l1-x2=[-0.2;0.3] - fg += JacobianFactor(X(2), -5*I_2x2, L(1), 5*I_2x2, Vector2(-1.0, 1.5)); + fg.emplace_shared(X(2), -5*I_2x2, L(1), 5*I_2x2, Vector2(-1.0, 1.5)); return fg; } @@ -463,10 +463,7 @@ inline std::pair createNonlinearSmoother(int T) { /* ************************************************************************* */ inline GaussianFactorGraph createSmoother(int T) { - NonlinearFactorGraph nlfg; - Values poses; - std::tie(nlfg, poses) = createNonlinearSmoother(T); - + const auto [nlfg, poses] = createNonlinearSmoother(T); return *nlfg.linearize(poses); } diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index c67914c8b..4aa357ba2 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -144,9 +144,9 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { NonlinearFactorGraph graph; Symbol x1('x',1); - graph += iq2D::PoseXInequality(x1, 1.0, true); - graph += iq2D::PoseYInequality(x1, 2.0, true); - graph += simulated2D::Prior(start_pt, soft_model2, x1); + graph.emplace_shared(x1, 1.0, true); + graph.emplace_shared(x1, 2.0, true); + graph.emplace_shared(start_pt, soft_model2, x1); Values initValues; initValues.insert(x1, start_pt); @@ -165,9 +165,9 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { Point2 start_pt(2.0, 3.0); NonlinearFactorGraph graph; - graph += iq2D::PoseXInequality(key, 1.0, false); - graph += iq2D::PoseYInequality(key, 2.0, false); - graph += simulated2D::Prior(start_pt, soft_model2, key); + graph.emplace_shared(key, 1.0, false); + graph.emplace_shared(key, 2.0, false); + graph.emplace_shared(start_pt, soft_model2, key); Values initValues; initValues.insert(key, start_pt); @@ -224,9 +224,9 @@ TEST( testBoundingConstraint, MaxDistance_simple_optimization) { Symbol x1('x',1), x2('x',2); NonlinearFactorGraph graph; - graph += simulated2D::equality_constraints::UnaryEqualityConstraint(pt1, x1); - graph += simulated2D::Prior(pt2_init, soft_model2_alt, x2); - graph += iq2D::PoseMaxDistConstraint(x1, x2, 2.0); + graph.emplace_shared(pt1, x1); + graph.emplace_shared(pt2_init, soft_model2_alt, x2); + graph.emplace_shared(x1, x2, 2.0); Values initial_state; initial_state.insert(x1, pt1); @@ -250,12 +250,12 @@ TEST( testBoundingConstraint, avoid_demo) { Point2 odo(2.0, 0.0); NonlinearFactorGraph graph; - graph += simulated2D::equality_constraints::UnaryEqualityConstraint(x1_pt, x1); - graph += simulated2D::Odometry(odo, soft_model2_alt, x1, x2); - graph += iq2D::LandmarkAvoid(x2, l1, radius); - graph += simulated2D::equality_constraints::UnaryEqualityPointConstraint(l1_pt, l1); - graph += simulated2D::Odometry(odo, soft_model2_alt, x2, x3); - graph += simulated2D::equality_constraints::UnaryEqualityConstraint(x3_pt, x3); + graph.emplace_shared(x1_pt, x1); + graph.emplace_shared(odo, soft_model2_alt, x1, x2); + graph.emplace_shared(x2, l1, radius); + graph.emplace_shared(l1_pt, l1); + graph.emplace_shared(odo, soft_model2_alt, x2, x3); + graph.emplace_shared(x3_pt, x3); Values init, expected; init.insert(x1, x1_pt); diff --git a/tests/testDoglegOptimizer.cpp b/tests/testDoglegOptimizer.cpp index 61276e89b..e64e5ab7a 100644 --- a/tests/testDoglegOptimizer.cpp +++ b/tests/testDoglegOptimizer.cpp @@ -30,7 +30,6 @@ #include #include -#include using namespace std; using namespace gtsam; @@ -43,22 +42,22 @@ using symbol_shorthand::L; TEST(DoglegOptimizer, ComputeBlend) { // Create an arbitrary Bayes Net GaussianBayesNet gbn; - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + gbn.emplace_shared( 0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(), 3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(), - 4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()); + gbn.emplace_shared( 1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(), 2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(), - 4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()); + gbn.emplace_shared( 2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(), - 3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()); + gbn.emplace_shared( 3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(), - 4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( - 4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished())); + 4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()); + gbn.emplace_shared( + 4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()); // Compute steepest descent point VectorValues xu = gbn.optimizeGradientSearch(); @@ -79,22 +78,22 @@ TEST(DoglegOptimizer, ComputeBlend) { TEST(DoglegOptimizer, ComputeDoglegPoint) { // Create an arbitrary Bayes Net GaussianBayesNet gbn; - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + gbn.emplace_shared( 0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(), 3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(), - 4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()); + gbn.emplace_shared( 1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(), 2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(), - 4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()); + gbn.emplace_shared( 2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(), - 3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( + 3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()); + gbn.emplace_shared( 3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(), - 4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished())); - gbn += GaussianConditional::shared_ptr(new GaussianConditional( - 4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished())); + 4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()); + gbn.emplace_shared( + 4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()); // Compute dogleg point for different deltas diff --git a/tests/testExpressionFactor.cpp b/tests/testExpressionFactor.cpp index 567e9df5d..fe2b543b0 100644 --- a/tests/testExpressionFactor.cpp +++ b/tests/testExpressionFactor.cpp @@ -233,9 +233,7 @@ TEST(ExpressionFactor, Shallow) { Point2_ expression = project(transformTo(x_, p_)); // Get and check keys and dims - KeyVector keys; - FastVector dims; - boost::tie(keys, dims) = expression.keysAndDims(); + const auto [keys, dims] = expression.keysAndDims(); LONGS_EQUAL(2,keys.size()); LONGS_EQUAL(2,dims.size()); LONGS_EQUAL(1,keys[0]); diff --git a/tests/testGaussianBayesTreeB.cpp b/tests/testGaussianBayesTreeB.cpp index fedc75945..bb1180903 100644 --- a/tests/testGaussianBayesTreeB.cpp +++ b/tests/testGaussianBayesTreeB.cpp @@ -75,7 +75,7 @@ TEST( GaussianBayesTree, linear_smoother_shortcuts ) double sigma3 = 0.61808; Matrix A56 = (Matrix(2,2) << -0.382022,0.,0.,-0.382022).finished(); GaussianBayesNet expected3; - expected3 += GaussianConditional(X(5), Z_2x1, I_2x2/sigma3, X(6), A56/sigma3); + expected3.emplace_shared(X(5), Z_2x1, I_2x2/sigma3, X(6), A56/sigma3); GaussianBayesTree::sharedClique C3 = bayesTree[X(4)]; GaussianBayesNet actual3 = C3->shortcut(R); EXPECT(assert_equal(expected3,actual3,tol)); @@ -84,7 +84,7 @@ TEST( GaussianBayesTree, linear_smoother_shortcuts ) double sigma4 = 0.661968; Matrix A46 = (Matrix(2,2) << -0.146067,0.,0.,-0.146067).finished(); GaussianBayesNet expected4; - expected4 += GaussianConditional(X(4), Z_2x1, I_2x2/sigma4, X(6), A46/sigma4); + expected4.emplace_shared(X(4), Z_2x1, I_2x2/sigma4, X(6), A46/sigma4); GaussianBayesTree::sharedClique C4 = bayesTree[X(3)]; GaussianBayesNet actual4 = C4->shortcut(R); EXPECT(assert_equal(expected4,actual4,tol)); @@ -114,8 +114,7 @@ TEST(GaussianBayesTree, balanced_smoother_marginals) { GaussianFactorGraph smoother = createSmoother(7); // Create the Bayes tree - Ordering ordering; - ordering += X(1), X(3), X(5), X(7), X(2), X(6), X(4); + const Ordering ordering{X(1), X(3), X(5), X(7), X(2), X(6), X(4)}; GaussianBayesTree bayesTree = *smoother.eliminateMultifrontal(ordering); VectorValues actualSolution = bayesTree.optimize(); @@ -162,8 +161,7 @@ TEST( GaussianBayesTree, balanced_smoother_shortcuts ) GaussianFactorGraph smoother = createSmoother(7); // Create the Bayes tree - Ordering ordering; - ordering += X(1),X(3),X(5),X(7),X(2),X(6),X(4); + const Ordering ordering{X(1), X(3), X(5), X(7), X(2), X(6), X(4)}; GaussianBayesTree bayesTree = *smoother.eliminateMultifrontal(ordering); // Check the conditional P(Root|Root) @@ -194,8 +192,7 @@ TEST( GaussianBayesTree, balanced_smoother_shortcuts ) //TEST( BayesTree, balanced_smoother_clique_marginals ) //{ // // Create smoother with 7 nodes -// Ordering ordering; -// ordering += X(1),X(3),X(5),X(7),X(2),X(6),X(4); +// const Ordering ordering{X(1),X(3),X(5),X(7),X(2),X(6),X(4)}; // GaussianFactorGraph smoother = createSmoother(7, ordering).first; // // // Create the Bayes tree @@ -223,8 +220,7 @@ TEST( GaussianBayesTree, balanced_smoother_shortcuts ) TEST( GaussianBayesTree, balanced_smoother_joint ) { // Create smoother with 7 nodes - Ordering ordering; - ordering += X(1),X(3),X(5),X(7),X(2),X(6),X(4); + const Ordering ordering{X(1), X(3), X(5), X(7), X(2), X(6), X(4)}; GaussianFactorGraph smoother = createSmoother(7); // Create the Bayes tree, expected to look like: diff --git a/tests/testGaussianFactorGraphB.cpp b/tests/testGaussianFactorGraphB.cpp index a6aa7bfc5..36f618c59 100644 --- a/tests/testGaussianFactorGraphB.cpp +++ b/tests/testGaussianFactorGraphB.cpp @@ -26,10 +26,6 @@ #include -#include -#include -namespace br { using namespace boost::range; using namespace boost::adaptors; } - #include #include @@ -82,8 +78,7 @@ TEST(GaussianFactorGraph, eliminateOne_x1) { /* ************************************************************************* */ TEST(GaussianFactorGraph, eliminateOne_x2) { - Ordering ordering; - ordering += X(2), L(1), X(1); + const Ordering ordering{X(2), L(1), X(1)}; GaussianFactorGraph fg = createGaussianFactorGraph(); auto actual = EliminateQR(fg, Ordering{X(2)}).first; @@ -98,8 +93,7 @@ TEST(GaussianFactorGraph, eliminateOne_x2) { /* ************************************************************************* */ TEST(GaussianFactorGraph, eliminateOne_l1) { - Ordering ordering; - ordering += L(1), X(1), X(2); + const Ordering ordering{L(1), X(1), X(2)}; GaussianFactorGraph fg = createGaussianFactorGraph(); auto actual = EliminateQR(fg, Ordering{L(1)}).first; @@ -115,9 +109,7 @@ TEST(GaussianFactorGraph, eliminateOne_l1) { /* ************************************************************************* */ TEST(GaussianFactorGraph, eliminateOne_x1_fast) { GaussianFactorGraph fg = createGaussianFactorGraph(); - GaussianConditional::shared_ptr conditional; - JacobianFactor::shared_ptr remaining; - boost::tie(conditional, remaining) = EliminateQR(fg, Ordering{X(1)}); + const auto [conditional, remaining] = EliminateQR(fg, Ordering{X(1)}); // create expected Conditional Gaussian Matrix I = 15 * I_2x2, R11 = I, S12 = -0.111111 * I, S13 = -0.444444 * I; @@ -288,14 +280,11 @@ TEST(GaussianFactorGraph, elimination) { fg.emplace_shared(X(2), Ap, b, sigma); // Eliminate - Ordering ordering; - ordering += X(1), X(2); + const Ordering ordering{X(1), X(2)}; GaussianBayesNet bayesNet = *fg.eliminateSequential(); // Check matrix - Matrix R; - Vector d; - boost::tie(R, d) = bayesNet.matrix(); + const auto [R, d] = bayesNet.matrix(); Matrix expected = (Matrix(2, 2) << 0.707107, -0.353553, 0.0, 0.612372).finished(); Matrix expected2 = @@ -356,7 +345,6 @@ static SharedDiagonal model = noiseModel::Isotropic::Sigma(2,1); /* ************************************************************************* */ TEST(GaussianFactorGraph, replace) { - Ordering ord; ord += X(1),X(2),X(3),X(4),X(5),X(6); SharedDiagonal noise(noiseModel::Isotropic::Sigma(3, 1.0)); GaussianFactorGraph::sharedFactor f1(new JacobianFactor( @@ -439,10 +427,10 @@ TEST( GaussianFactorGraph, conditional_sigma_failure) { 0.0, 1., 0.0, -1.2246468e-16, 0.0, -1), Point3(0.511832102, 8.42819594, 5.76841725)), priorModel); - factors += ProjectionFactor(Point2(333.648615, 98.61535), measModel, xC1, l32, K); - factors += ProjectionFactor(Point2(218.508, 83.8022039), measModel, xC1, l41, K); - factors += RangeFactor(xC1, l32, relElevation, elevationModel); - factors += RangeFactor(xC1, l41, relElevation, elevationModel); + factors.emplace_shared(Point2(333.648615, 98.61535), measModel, xC1, l32, K); + factors.emplace_shared(Point2(218.508, 83.8022039), measModel, xC1, l41, K); + factors.emplace_shared>(xC1, l32, relElevation, elevationModel); + factors.emplace_shared>(xC1, l41, relElevation, elevationModel); // Check that sigmas are correct (i.e., unit) GaussianFactorGraph lfg = *factors.linearize(initValues); @@ -450,7 +438,7 @@ TEST( GaussianFactorGraph, conditional_sigma_failure) { GaussianBayesTree actBT = *lfg.eliminateMultifrontal(); // Check that all sigmas in an unconstrained bayes tree are set to one - for(const GaussianBayesTree::sharedClique& clique: actBT.nodes() | br::map_values) { + for (const auto& [key, clique]: actBT.nodes()) { GaussianConditional::shared_ptr conditional = clique->conditional(); //size_t dim = conditional->rows(); //EXPECT(assert_equal(gtsam::Vector::Ones(dim), conditional->get_model()->sigmas(), tol)); diff --git a/tests/testGaussianISAM.cpp b/tests/testGaussianISAM.cpp index 63cf654e5..ee333f4c4 100644 --- a/tests/testGaussianISAM.cpp +++ b/tests/testGaussianISAM.cpp @@ -22,9 +22,6 @@ #include #include -#include -namespace br { using namespace boost::adaptors; using namespace boost::range; } - using namespace std; using namespace gtsam; using namespace example; @@ -36,7 +33,7 @@ using symbol_shorthand::L; TEST( ISAM, iSAM_smoother ) { Ordering ordering; - for (int t = 1; t <= 7; t++) ordering += X(t); + for (int t = 1; t <= 7; t++) ordering.push_back(X(t)); // Create smoother with 7 nodes GaussianFactorGraph smoother = createSmoother(7); @@ -53,7 +50,7 @@ TEST( ISAM, iSAM_smoother ) GaussianBayesTree expected = *smoother.eliminateMultifrontal(ordering); // Verify sigmas in the bayes tree - for(const GaussianBayesTree::sharedClique& clique: expected.nodes() | br::map_values) { + for (const auto& [key, clique] : expected.nodes()) { GaussianConditional::shared_ptr conditional = clique->conditional(); EXPECT(!conditional->get_model()); } diff --git a/tests/testGaussianISAM2.cpp b/tests/testGaussianISAM2.cpp index 8c4238e2f..5331b7dc4 100644 --- a/tests/testGaussianISAM2.cpp +++ b/tests/testGaussianISAM2.cpp @@ -24,8 +24,6 @@ #include -#include -namespace br { using namespace boost::adaptors; using namespace boost::range; } using namespace std; using namespace gtsam; @@ -76,7 +74,7 @@ ISAM2 createSlamlikeISAM2( // Add odometry from time 0 to time 5 for( ; i<5; ++i) { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); fullgraph.push_back(newfactors); Values init; @@ -95,9 +93,9 @@ ISAM2 createSlamlikeISAM2( // Add odometry from time 5 to 6 and landmark measurement at time 5 { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); - newfactors += BearingRangeFactor(i, 100, Rot2::fromAngle(M_PI/4.0), 5.0, brNoise); - newfactors += BearingRangeFactor(i, 101, Rot2::fromAngle(-M_PI/4.0), 5.0, brNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, 100, Rot2::fromAngle(M_PI/4.0), 5.0, brNoise); + newfactors.emplace_shared>(i, 101, Rot2::fromAngle(-M_PI/4.0), 5.0, brNoise); fullgraph.push_back(newfactors); Values init; @@ -118,7 +116,7 @@ ISAM2 createSlamlikeISAM2( // Add odometry from time 6 to time 10 for( ; i<10; ++i) { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); fullgraph.push_back(newfactors); Values init; @@ -137,9 +135,9 @@ ISAM2 createSlamlikeISAM2( // Add odometry from time 10 to 11 and landmark measurement at time 10 { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); - newfactors += BearingRangeFactor(i, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise); - newfactors += BearingRangeFactor(i, 101, Rot2::fromAngle(-M_PI/4.0 + M_PI/16.0), 4.5, brNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise); + newfactors.emplace_shared>(i, 101, Rot2::fromAngle(-M_PI/4.0 + M_PI/16.0), 4.5, brNoise); fullgraph.push_back(newfactors); Values init; @@ -232,7 +230,7 @@ bool isam_check(const NonlinearFactorGraph& fullgraph, const Values& fullinit, c // Check information GaussianFactorGraph isamGraph(isam); - isamGraph += isam.roots().front()->cachedFactor_; + isamGraph.push_back(isam.roots().front()->cachedFactor_); Matrix expectedHessian = fullgraph.linearize(isam.getLinearizationPoint())->augmentedHessian(); Matrix actualHessian = isamGraph.augmentedHessian(); expectedHessian.bottomRightCorner(1,1) = actualHessian.bottomRightCorner(1,1); @@ -248,11 +246,10 @@ bool isam_check(const NonlinearFactorGraph& fullgraph, const Values& fullinit, c // Check gradient at each node bool nodeGradientsOk = true; - typedef ISAM2::sharedClique sharedClique; - for(const sharedClique& clique: isam.nodes() | br::map_values) { + for (const auto& [key, clique] : isam.nodes()) { // Compute expected gradient GaussianFactorGraph jfg; - jfg += clique->conditional(); + jfg.push_back(clique->conditional()); VectorValues expectedGradient = jfg.gradientAtZero(); // Compare with actual gradients DenseIndex variablePosition = 0; @@ -356,7 +353,7 @@ TEST(ISAM2, clone) { // Modify original isam NonlinearFactorGraph factors; - factors += BetweenFactor(0, 10, + factors.emplace_shared>(0, 10, isam.calculateEstimate(0).between(isam.calculateEstimate(10)), noiseModel::Unit::Create(3)); isam.update(factors); @@ -442,7 +439,7 @@ TEST(ISAM2, swapFactors) NonlinearFactorGraph swapfactors; // swapfactors += BearingRange(10, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise; // original factor - swapfactors += BearingRangeFactor(10, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 5.0, brNoise); + swapfactors.emplace_shared>(10, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 5.0, brNoise); fullgraph.push_back(swapfactors); isam.update(swapfactors, Values(), toRemove); } @@ -452,11 +449,10 @@ TEST(ISAM2, swapFactors) EXPECT(isam_check(fullgraph, fullinit, isam, *this, result_)); // Check gradient at each node - typedef ISAM2::sharedClique sharedClique; - for(const sharedClique& clique: isam.nodes() | br::map_values) { + for (const auto& [key, clique]: isam.nodes()) { // Compute expected gradient GaussianFactorGraph jfg; - jfg += clique->conditional(); + jfg.push_back(clique->conditional()); VectorValues expectedGradient = jfg.gradientAtZero(); // Compare with actual gradients DenseIndex variablePosition = 0; @@ -511,7 +507,7 @@ TEST(ISAM2, constrained_ordering) // Add odometry from time 0 to time 5 for( ; i<5; ++i) { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); fullgraph.push_back(newfactors); Values init; @@ -527,9 +523,9 @@ TEST(ISAM2, constrained_ordering) // Add odometry from time 5 to 6 and landmark measurement at time 5 { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); - newfactors += BearingRangeFactor(i, 100, Rot2::fromAngle(M_PI/4.0), 5.0, brNoise); - newfactors += BearingRangeFactor(i, 101, Rot2::fromAngle(-M_PI/4.0), 5.0, brNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, 100, Rot2::fromAngle(M_PI/4.0), 5.0, brNoise); + newfactors.emplace_shared>(i, 101, Rot2::fromAngle(-M_PI/4.0), 5.0, brNoise); fullgraph.push_back(newfactors); Values init; @@ -547,7 +543,7 @@ TEST(ISAM2, constrained_ordering) // Add odometry from time 6 to time 10 for( ; i<10; ++i) { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); fullgraph.push_back(newfactors); Values init; @@ -560,9 +556,9 @@ TEST(ISAM2, constrained_ordering) // Add odometry from time 10 to 11 and landmark measurement at time 10 { NonlinearFactorGraph newfactors; - newfactors += BetweenFactor(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); - newfactors += BearingRangeFactor(i, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise); - newfactors += BearingRangeFactor(i, 101, Rot2::fromAngle(-M_PI/4.0 + M_PI/16.0), 4.5, brNoise); + newfactors.emplace_shared>(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise); + newfactors.emplace_shared>(i, 100, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise); + newfactors.emplace_shared>(i, 101, Rot2::fromAngle(-M_PI/4.0 + M_PI/16.0), 4.5, brNoise); fullgraph.push_back(newfactors); Values init; @@ -577,11 +573,10 @@ TEST(ISAM2, constrained_ordering) EXPECT(isam_check(fullgraph, fullinit, isam, *this, result_)); // Check gradient at each node - typedef ISAM2::sharedClique sharedClique; - for(const sharedClique& clique: isam.nodes() | br::map_values) { + for (const auto& [key, clique]: isam.nodes()) { // Compute expected gradient GaussianFactorGraph jfg; - jfg += clique->conditional(); + jfg.push_back(clique->conditional()); VectorValues expectedGradient = jfg.gradientAtZero(); // Compare with actual gradients DenseIndex variablePosition = 0; @@ -620,9 +615,11 @@ namespace { bool checkMarginalizeLeaves(ISAM2& isam, const FastList& leafKeys) { Matrix expectedAugmentedHessian, expected3AugmentedHessian; KeyVector toKeep; - for(Key j: isam.getDelta() | br::map_keys) - if(find(leafKeys.begin(), leafKeys.end(), j) == leafKeys.end()) - toKeep.push_back(j); + for (const auto& [key, clique]: isam.getDelta()) { + if(find(leafKeys.begin(), leafKeys.end(), key) == leafKeys.end()) { + toKeep.push_back(key); + } + } // Calculate expected marginal from iSAM2 tree expectedAugmentedHessian = GaussianFactorGraph(isam).marginal(toKeep, EliminateQR)->augmentedHessian(); @@ -668,9 +665,9 @@ TEST(ISAM2, marginalizeLeaves1) { NonlinearFactorGraph factors; factors.addPrior(0, 0.0, model); - factors += BetweenFactor(0, 1, 0.0, model); - factors += BetweenFactor(1, 2, 0.0, model); - factors += BetweenFactor(0, 2, 0.0, model); + factors.emplace_shared>(0, 1, 0.0, model); + factors.emplace_shared>(1, 2, 0.0, model); + factors.emplace_shared>(0, 2, 0.0, model); Values values; values.insert(0, 0.0); @@ -695,10 +692,10 @@ TEST(ISAM2, marginalizeLeaves2) { NonlinearFactorGraph factors; factors.addPrior(0, 0.0, model); - factors += BetweenFactor(0, 1, 0.0, model); - factors += BetweenFactor(1, 2, 0.0, model); - factors += BetweenFactor(0, 2, 0.0, model); - factors += BetweenFactor(2, 3, 0.0, model); + factors.emplace_shared>(0, 1, 0.0, model); + factors.emplace_shared>(1, 2, 0.0, model); + factors.emplace_shared>(0, 2, 0.0, model); + factors.emplace_shared>(2, 3, 0.0, model); Values values; values.insert(0, 0.0); @@ -725,15 +722,15 @@ TEST(ISAM2, marginalizeLeaves3) { NonlinearFactorGraph factors; factors.addPrior(0, 0.0, model); - factors += BetweenFactor(0, 1, 0.0, model); - factors += BetweenFactor(1, 2, 0.0, model); - factors += BetweenFactor(0, 2, 0.0, model); + factors.emplace_shared>(0, 1, 0.0, model); + factors.emplace_shared>(1, 2, 0.0, model); + factors.emplace_shared>(0, 2, 0.0, model); - factors += BetweenFactor(2, 3, 0.0, model); + factors.emplace_shared>(2, 3, 0.0, model); - factors += BetweenFactor(3, 4, 0.0, model); - factors += BetweenFactor(4, 5, 0.0, model); - factors += BetweenFactor(3, 5, 0.0, model); + factors.emplace_shared>(3, 4, 0.0, model); + factors.emplace_shared>(4, 5, 0.0, model); + factors.emplace_shared>(3, 5, 0.0, model); Values values; values.insert(0, 0.0); @@ -763,8 +760,8 @@ TEST(ISAM2, marginalizeLeaves4) { NonlinearFactorGraph factors; factors.addPrior(0, 0.0, model); - factors += BetweenFactor(0, 2, 0.0, model); - factors += BetweenFactor(1, 2, 0.0, model); + factors.emplace_shared>(0, 2, 0.0, model); + factors.emplace_shared>(1, 2, 0.0, model); Values values; values.insert(0, 0.0); diff --git a/tests/testGaussianJunctionTreeB.cpp b/tests/testGaussianJunctionTreeB.cpp index 2740babd0..2c5dc493b 100644 --- a/tests/testGaussianJunctionTreeB.cpp +++ b/tests/testGaussianJunctionTreeB.cpp @@ -65,9 +65,7 @@ using symbol_shorthand::L; */ TEST( GaussianJunctionTreeB, constructor2 ) { // create a graph - NonlinearFactorGraph nlfg; - Values values; - boost::tie(nlfg, values) = createNonlinearSmoother(7); + const auto [nlfg, values] = createNonlinearSmoother(7); SymbolicFactorGraph::shared_ptr symbolic = nlfg.symbolic(); // linearize @@ -80,20 +78,7 @@ TEST( GaussianJunctionTreeB, constructor2 ) { SymbolicEliminationTree stree(*symbolic, ordering); GaussianJunctionTree actual(etree); - Ordering o324; - o324 += X(3), X(2), X(4); - Ordering o56; - o56 += X(5), X(6); - Ordering o7; - o7 += X(7); - Ordering o1; - o1 += X(1); - - // Can no longer test these: -// Ordering sep1; -// Ordering sep2; sep2 += X(4); -// Ordering sep3; sep3 += X(6); -// Ordering sep4; sep4 += X(2); + const Ordering o324{X(3), X(2), X(4)}, o56{X(5), X(6)}, o7{X(7)}, o1{X(1)}; GaussianJunctionTree::sharedNode x324 = actual.roots().front(); LONGS_EQUAL(2, x324->children.size()); @@ -125,43 +110,35 @@ TEST( GaussianJunctionTreeB, constructor2 ) { } ///* ************************************************************************* */ -//TEST( GaussianJunctionTreeB, optimizeMultiFrontal ) -//{ -// // create a graph -// GaussianFactorGraph fg; -// Ordering ordering; -// boost::tie(fg,ordering) = createSmoother(7); -// -// // optimize the graph -// GaussianJunctionTree tree(fg); -// VectorValues actual = tree.optimize(&EliminateQR); -// -// // verify -// VectorValues expected(vector(7,2)); // expected solution -// Vector v = Vector2(0., 0.); -// for (int i=1; i<=7; i++) -// expected[ordering[X(i)]] = v; -// EXPECT(assert_equal(expected,actual)); -//} -// -///* ************************************************************************* */ -//TEST( GaussianJunctionTreeB, optimizeMultiFrontal2) -//{ -// // create a graph -// example::Graph nlfg = createNonlinearFactorGraph(); -// Values noisy = createNoisyValues(); -// Ordering ordering; ordering += X(1),X(2),L(1); -// GaussianFactorGraph fg = *nlfg.linearize(noisy, ordering); -// -// // optimize the graph -// GaussianJunctionTree tree(fg); -// VectorValues actual = tree.optimize(&EliminateQR); -// -// // verify -// VectorValues expected = createCorrectDelta(ordering); // expected solution -// EXPECT(assert_equal(expected,actual)); -//} -// +TEST(GaussianJunctionTreeB, OptimizeMultiFrontal) { + // create a graph + const auto fg = createSmoother(7); + + // optimize the graph + const VectorValues actual = fg.optimize(&EliminateQR); + + // verify + VectorValues expected; + const Vector v = Vector2(0., 0.); + for (int i = 1; i <= 7; i++) expected.emplace(X(i), v); + EXPECT(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(GaussianJunctionTreeB, optimizeMultiFrontal2) { + // create a graph + const auto nlfg = createNonlinearFactorGraph(); + const auto noisy = createNoisyValues(); + const auto fg = *nlfg.linearize(noisy); + + // optimize the graph + VectorValues actual = fg.optimize(&EliminateQR); + + // verify + VectorValues expected = createCorrectDelta(); // expected solution + EXPECT(assert_equal(expected, actual)); +} + ///* ************************************************************************* */ //TEST(GaussianJunctionTreeB, slamlike) { // Values init; @@ -238,8 +215,7 @@ TEST( GaussianJunctionTreeB, constructor2 ) { // init.insert(X(0), Pose2()); // init.insert(X(1), Pose2(1.0, 0.0, 0.0)); // -// Ordering ordering; -// ordering += X(1), X(0); +// const Ordering ordering{X(1), X(0)}; // // GaussianFactorGraph gfg = *fg.linearize(init, ordering); // diff --git a/tests/testGncOptimizer.cpp b/tests/testGncOptimizer.cpp index 15d660114..5424a5744 100644 --- a/tests/testGncOptimizer.cpp +++ b/tests/testGncOptimizer.cpp @@ -737,9 +737,7 @@ TEST(GncOptimizer, setInlierCostThresholds) { TEST(GncOptimizer, optimizeSmallPoseGraph) { /// load small pose graph const string filename = findExampleDataFile("w100.graph"); - NonlinearFactorGraph::shared_ptr graph; - Values::shared_ptr initial; - boost::tie(graph, initial) = load2D(filename); + const auto [graph, initial] = load2D(filename); // Add a Gaussian prior on first poses Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas( diff --git a/tests/testGradientDescentOptimizer.cpp b/tests/testGradientDescentOptimizer.cpp index 83b0f9e8a..d7ca70459 100644 --- a/tests/testGradientDescentOptimizer.cpp +++ b/tests/testGradientDescentOptimizer.cpp @@ -20,13 +20,12 @@ #include -#include using namespace std; using namespace gtsam; // Generate a small PoseSLAM problem -boost::tuple generateProblem() { +std::tuple generateProblem() { // 1. Create graph container and add factors to it NonlinearFactorGraph graph; @@ -40,15 +39,15 @@ boost::tuple generateProblem() { // 2b. Add odometry factors SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas( Vector3(0.2, 0.2, 0.1)); - graph += BetweenFactor(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise); - graph += BetweenFactor(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph += BetweenFactor(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph += BetweenFactor(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise); + graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise); + graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise); + graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise); + graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise); // 2c. Add pose constraint SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas( Vector3(0.2, 0.2, 0.1)); - graph += BetweenFactor(5, 2, Pose2(2.0, 0.0, M_PI_2), + graph.emplace_shared>(5, 2, Pose2(2.0, 0.0, M_PI_2), constraintUncertainty); // 3. Create the data structure to hold the initialEstimate estimate to the solution @@ -64,16 +63,12 @@ boost::tuple generateProblem() { Pose2 x5(2.1, 2.1, -M_PI_2); initialEstimate.insert(5, x5); - return boost::tie(graph, initialEstimate); + return {graph, initialEstimate}; } /* ************************************************************************* */ TEST(NonlinearConjugateGradientOptimizer, Optimize) { - - NonlinearFactorGraph graph; - Values initialEstimate; - - boost::tie(graph, initialEstimate) = generateProblem(); +const auto [graph, initialEstimate] = generateProblem(); // cout << "initial error = " << graph.error(initialEstimate) << endl; NonlinearOptimizerParams param; diff --git a/tests/testGraph.cpp b/tests/testGraph.cpp index 0d2a25fbd..92b2c56c9 100644 --- a/tests/testGraph.cpp +++ b/tests/testGraph.cpp @@ -68,7 +68,7 @@ TEST( Graph, predecessorMap2Graph ) p_map.insert(1, 2); p_map.insert(2, 2); p_map.insert(3, 2); - boost::tie(graph, root, key2vertex) = predecessorMap2Graph, SVertex, Key>(p_map); + std::tie(graph, root, key2vertex) = predecessorMap2Graph, SVertex, Key>(p_map); LONGS_EQUAL(3, (long)boost::num_vertices(graph)); CHECK(root == key2vertex[2]); @@ -174,7 +174,7 @@ TEST( GaussianFactorGraph, findMinimumSpanningTree ) // G.push_factor("x3", "x4"); // // SymbolicFactorGraph T, C; -// boost::tie(T, C) = G.splitMinimumSpanningTree(); +// std::tie(T, C) = G.splitMinimumSpanningTree(); // // SymbolicFactorGraph expectedT, expectedC; // expectedT.push_factor("x1", "x2"); @@ -207,7 +207,7 @@ TEST( GaussianFactorGraph, findMinimumSpanningTree ) // // SymbolicFactorGraph singletonGraph; // set singletons; -// boost::tie(singletonGraph, singletons) = G.removeSingletons(); +// std::tie(singletonGraph, singletons) = G.removeSingletons(); // // set singletons_excepted; singletons_excepted += "x1", "x2", "x5", "l1", "l3"; // CHECK(singletons_excepted == singletons); diff --git a/tests/testIterative.cpp b/tests/testIterative.cpp index 5519a7f04..6d9918b76 100644 --- a/tests/testIterative.cpp +++ b/tests/testIterative.cpp @@ -59,10 +59,8 @@ TEST( Iterative, conjugateGradientDescent ) VectorValues expected = fg.optimize(); // get matrices - Matrix A; - Vector b; Vector x0 = Z_6x1; - boost::tie(A, b) = fg.jacobian(); + const auto [A, b] = fg.jacobian(); Vector expectedX = (Vector(6) << -0.1, 0.1, -0.1, -0.1, 0.1, -0.2).finished(); // Do conjugate gradient descent, System version @@ -89,8 +87,8 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint ) config.insert(X(2), Pose2(1.5,0.,0.)); NonlinearFactorGraph graph; - graph += NonlinearEquality(X(1), pose1); - graph += BetweenFactor(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); + graph.emplace_shared>(X(1), pose1); + graph.emplace_shared>(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); std::shared_ptr fg = graph.linearize(config); @@ -117,7 +115,7 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) NonlinearFactorGraph graph; graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); - graph += BetweenFactor(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); + graph.emplace_shared>(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); std::shared_ptr fg = graph.linearize(config); diff --git a/tests/testLie.cpp b/tests/testLie.cpp index fe1173f22..f411bd6ef 100644 --- a/tests/testLie.cpp +++ b/tests/testLie.cpp @@ -47,9 +47,9 @@ template<> struct traits : internal::LieGroupTraits { //****************************************************************************** TEST(Lie, ProductLieGroup) { - BOOST_CONCEPT_ASSERT((IsGroup)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsGroup); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); Product pair1; Vector5 d; d << 1, 2, 0.1, 0.2, 0.3; diff --git a/tests/testManifold.cpp b/tests/testManifold.cpp index 7d788d109..6b853061b 100644 --- a/tests/testManifold.cpp +++ b/tests/testManifold.cpp @@ -37,27 +37,27 @@ typedef PinholeCamera Camera; //****************************************************************************** TEST(Manifold, SomeManifoldsGTSAM) { - //BOOST_CONCEPT_ASSERT((IsManifold)); // integer is not a manifold - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsManifold)); + //GTSAM_CONCEPT_ASSERT(IsManifold); // integer is not a manifold + GTSAM_CONCEPT_ASSERT1(IsManifold); + GTSAM_CONCEPT_ASSERT2(IsManifold); + GTSAM_CONCEPT_ASSERT3(IsManifold); + GTSAM_CONCEPT_ASSERT4(IsManifold); } //****************************************************************************** TEST(Manifold, SomeLieGroupsGTSAM) { - BOOST_CONCEPT_ASSERT((IsLieGroup)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); - BOOST_CONCEPT_ASSERT((IsLieGroup)); + GTSAM_CONCEPT_ASSERT1(IsLieGroup); + GTSAM_CONCEPT_ASSERT2(IsLieGroup); + GTSAM_CONCEPT_ASSERT3(IsLieGroup); + GTSAM_CONCEPT_ASSERT4(IsLieGroup); } //****************************************************************************** TEST(Manifold, SomeVectorSpacesGTSAM) { - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); - BOOST_CONCEPT_ASSERT((IsVectorSpace)); + GTSAM_CONCEPT_ASSERT1(IsVectorSpace); + GTSAM_CONCEPT_ASSERT2(IsVectorSpace); + GTSAM_CONCEPT_ASSERT3(IsVectorSpace); + GTSAM_CONCEPT_ASSERT4(IsVectorSpace); } //****************************************************************************** diff --git a/tests/testMarginals.cpp b/tests/testMarginals.cpp index d7746e08d..344a1f56c 100644 --- a/tests/testMarginals.cpp +++ b/tests/testMarginals.cpp @@ -59,8 +59,8 @@ TEST(Marginals, planarSLAMmarginals) { SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) // create between factors to represent odometry - graph += BetweenFactor(x1, x2, odometry, odometryNoise); - graph += BetweenFactor(x2, x3, odometry, odometryNoise); + graph.emplace_shared>(x1, x2, odometry, odometryNoise); + graph.emplace_shared>(x2, x3, odometry, odometryNoise); /* add measurements */ // general noisemodel for measurements @@ -75,9 +75,9 @@ TEST(Marginals, planarSLAMmarginals) { range32 = 2.0; // create bearing/range factors - graph += BearingRangeFactor(x1, l1, bearing11, range11, measurementNoise); - graph += BearingRangeFactor(x2, l1, bearing21, range21, measurementNoise); - graph += BearingRangeFactor(x3, l2, bearing32, range32, measurementNoise); + graph.emplace_shared>(x1, l1, bearing11, range11, measurementNoise); + graph.emplace_shared>(x2, l1, bearing21, range21, measurementNoise); + graph.emplace_shared>(x3, l2, bearing32, range32, measurementNoise); // linearization point for marginals Values soln; @@ -195,9 +195,9 @@ TEST(Marginals, planarSLAMmarginals) { TEST(Marginals, order) { NonlinearFactorGraph fg; fg.addPrior(0, Pose2(), noiseModel::Unit::Create(3)); - fg += BetweenFactor(0, 1, Pose2(1,0,0), noiseModel::Unit::Create(3)); - fg += BetweenFactor(1, 2, Pose2(1,0,0), noiseModel::Unit::Create(3)); - fg += BetweenFactor(2, 3, Pose2(1,0,0), noiseModel::Unit::Create(3)); + fg.emplace_shared>(0, 1, Pose2(1,0,0), noiseModel::Unit::Create(3)); + fg.emplace_shared>(1, 2, Pose2(1,0,0), noiseModel::Unit::Create(3)); + fg.emplace_shared>(2, 3, Pose2(1,0,0), noiseModel::Unit::Create(3)); Values vals; vals.insert(0, Pose2()); @@ -208,31 +208,31 @@ TEST(Marginals, order) { vals.insert(100, Point2(0,1)); vals.insert(101, Point2(1,1)); - fg += BearingRangeFactor(0, 100, + fg.emplace_shared>(0, 100, vals.at(0).bearing(vals.at(100)), vals.at(0).range(vals.at(100)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(0, 101, + fg.emplace_shared>(0, 101, vals.at(0).bearing(vals.at(101)), vals.at(0).range(vals.at(101)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(1, 100, + fg.emplace_shared>(1, 100, vals.at(1).bearing(vals.at(100)), vals.at(1).range(vals.at(100)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(1, 101, + fg.emplace_shared>(1, 101, vals.at(1).bearing(vals.at(101)), vals.at(1).range(vals.at(101)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(2, 100, + fg.emplace_shared>(2, 100, vals.at(2).bearing(vals.at(100)), vals.at(2).range(vals.at(100)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(2, 101, + fg.emplace_shared>(2, 101, vals.at(2).bearing(vals.at(101)), vals.at(2).range(vals.at(101)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(3, 100, + fg.emplace_shared>(3, 100, vals.at(3).bearing(vals.at(100)), vals.at(3).range(vals.at(100)), noiseModel::Unit::Create(2)); - fg += BearingRangeFactor(3, 101, + fg.emplace_shared>(3, 101, vals.at(3).bearing(vals.at(101)), vals.at(3).range(vals.at(101)), noiseModel::Unit::Create(2)); diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index efc0020a5..73eb2a2d9 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -193,11 +193,10 @@ TEST ( NonlinearEquality, allow_error_optimize ) { Symbol key1('x', 1); Pose2 feasible1(1.0, 2.0, 3.0); double error_gain = 500.0; - PoseNLE nle(key1, feasible1, error_gain); // add to a graph NonlinearFactorGraph graph; - graph += nle; + graph.emplace_shared(key1, feasible1, error_gain); // initialize away from the ideal Pose2 initPose(0.0, 2.0, 3.0); @@ -227,16 +226,13 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { Pose2 initPose(0.0, 2.0, 3.0); init.insert(key1, initPose); + // add nle to a graph double error_gain = 500.0; - PoseNLE nle(key1, feasible1, error_gain); - - // create a soft prior that conflicts - PosePrior prior(key1, initPose, noiseModel::Isotropic::Sigma(3, 0.1)); - - // add to a graph NonlinearFactorGraph graph; - graph += nle; - graph += prior; + graph.emplace_shared(key1, feasible1, error_gain); + + // add a soft prior that conflicts + graph.emplace_shared(key1, initPose, noiseModel::Isotropic::Sigma(3, 0.1)); // optimize Ordering ordering; @@ -440,17 +436,17 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { Symbol x1('x', 1), x2('x', 2); Symbol l1('l', 1), l2('l', 2); Point2 pt_x1(1.0, 1.0), pt_x2(5.0, 6.0); - graph += eq2D::UnaryEqualityConstraint(pt_x1, x1); - graph += eq2D::UnaryEqualityConstraint(pt_x2, x2); + graph.emplace_shared(pt_x1, x1); + graph.emplace_shared(pt_x2, x2); Point2 z1(0.0, 5.0); SharedNoiseModel sigma(noiseModel::Isotropic::Sigma(2, 0.1)); - graph += simulated2D::Measurement(z1, sigma, x1, l1); + graph.emplace_shared(z1, sigma, x1, l1); Point2 z2(-4.0, 0.0); - graph += simulated2D::Measurement(z2, sigma, x2, l2); + graph.emplace_shared(z2, sigma, x2, l2); - graph += eq2D::PointEqualityConstraint(l1, l2); + graph.emplace_shared(l1, l2); Values initialEstimate; initialEstimate.insert(x1, pt_x1); @@ -480,20 +476,20 @@ TEST (testNonlinearEqualityConstraint, map_warp ) { // constant constraint on x1 Point2 pose1(1.0, 1.0); - graph += eq2D::UnaryEqualityConstraint(pose1, x1); + graph.emplace_shared(pose1, x1); SharedDiagonal sigma = noiseModel::Isotropic::Sigma(2, 0.1); // measurement from x1 to l1 Point2 z1(0.0, 5.0); - graph += simulated2D::Measurement(z1, sigma, x1, l1); + graph.emplace_shared(z1, sigma, x1, l1); // measurement from x2 to l2 Point2 z2(-4.0, 0.0); - graph += simulated2D::Measurement(z2, sigma, x2, l2); + graph.emplace_shared(z2, sigma, x2, l2); // equality constraint between l1 and l2 - graph += eq2D::PointEqualityConstraint(l1, l2); + graph.emplace_shared(l1, l2); // create an initial estimate Values initialEstimate; @@ -542,18 +538,18 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { NonlinearFactorGraph graph; // create equality constraints for poses - graph += NonlinearEquality(key_x1, leftCamera.pose()); - graph += NonlinearEquality(key_x2, rightCamera.pose()); + graph.emplace_shared>(key_x1, leftCamera.pose()); + graph.emplace_shared>(key_x2, rightCamera.pose()); // create factors SharedDiagonal vmodel = noiseModel::Unit::Create(2); - graph += GenericProjectionFactor( + graph.emplace_shared>( leftCamera.project(landmark), vmodel, key_x1, key_l1, shK); - graph += GenericProjectionFactor( + graph.emplace_shared>( rightCamera.project(landmark), vmodel, key_x2, key_l2, shK); // add equality constraint saying there is only one point - graph += NonlinearEquality2(key_l1, key_l2); + graph.emplace_shared>(key_l1, key_l2); // create initial data Point3 landmark1(0.5, 5, 0); diff --git a/tests/testNonlinearFactorGraph.cpp b/tests/testNonlinearFactorGraph.cpp index 136cd064f..305cd963a 100644 --- a/tests/testNonlinearFactorGraph.cpp +++ b/tests/testNonlinearFactorGraph.cpp @@ -78,13 +78,13 @@ TEST( NonlinearFactorGraph, keys ) /* ************************************************************************* */ TEST( NonlinearFactorGraph, GET_ORDERING) { - Ordering expected; expected += L(1), X(2), X(1); // For starting with l1,x1,x2 + const Ordering expected{L(1), X(2), X(1)}; // For starting with l1,x1,x2 NonlinearFactorGraph nlfg = createNonlinearFactorGraph(); Ordering actual = Ordering::Colamd(nlfg); EXPECT(assert_equal(expected,actual)); // Constrained ordering - put x2 at the end - Ordering expectedConstrained; expectedConstrained += L(1), X(1), X(2); + const Ordering expectedConstrained{L(1), X(1), X(2)}; FastMap constraints; constraints[X(2)] = 1; Ordering actualConstrained = Ordering::ColamdConstrained(nlfg, constraints); @@ -162,8 +162,8 @@ TEST( NonlinearFactorGraph, rekey ) // updated measurements Point2 z3(0, -1), z4(-1.5, -1.); SharedDiagonal sigma0_2 = noiseModel::Isotropic::Sigma(2,0.2); - expRekey += simulated2D::Measurement(z3, sigma0_2, X(1), L(4)); - expRekey += simulated2D::Measurement(z4, sigma0_2, X(2), L(4)); + expRekey.emplace_shared(z3, sigma0_2, X(1), L(4)); + expRekey.emplace_shared(z4, sigma0_2, X(2), L(4)); EXPECT(assert_equal(expRekey, actRekey)); } @@ -198,8 +198,7 @@ TEST(NonlinearFactorGraph, UpdateCholesky) { EXPECT(assert_equal(expected, fg.updateCholesky(initial))); // solve with Ordering - Ordering ordering; - ordering += L(1), X(2), X(1); + const Ordering ordering{L(1), X(2), X(1)}; EXPECT(assert_equal(expected, fg.updateCholesky(initial, ordering))); // solve with new method, heavily damped @@ -251,8 +250,7 @@ TEST(testNonlinearFactorGraph, eliminate) { auto linearized = graph.linearize(values); // Eliminate - Ordering ordering; - ordering += 11, 21, 12, 22; + const Ordering ordering{11, 21, 12, 22}; auto bn = linearized->eliminateSequential(ordering); EXPECT_LONGS_EQUAL(4, bn->size()); } diff --git a/tests/testNonlinearISAM.cpp b/tests/testNonlinearISAM.cpp index 4249f5bc4..4ffdbdafe 100644 --- a/tests/testNonlinearISAM.cpp +++ b/tests/testNonlinearISAM.cpp @@ -36,7 +36,7 @@ TEST(testNonlinearISAM, markov_chain ) { // create initial graph Pose2 cur_pose; // start at origin NonlinearFactorGraph start_factors; - start_factors += NonlinearEquality(0, cur_pose); + start_factors.emplace_shared>(0, cur_pose); Values init; Values expected; @@ -50,7 +50,7 @@ TEST(testNonlinearISAM, markov_chain ) { Pose2 z(1.0, 2.0, 0.1); for (size_t i=1; i<=nrPoses; ++i) { NonlinearFactorGraph new_factors; - new_factors += BetweenFactor(i-1, i, z, model); + new_factors.emplace_shared>(i-1, i, z, model); Values new_init; cur_pose = cur_pose.compose(z); @@ -84,7 +84,7 @@ TEST(testNonlinearISAM, markov_chain_with_disconnects ) { // create initial graph Pose2 cur_pose; // start at origin NonlinearFactorGraph start_factors; - start_factors += NonlinearEquality(0, cur_pose); + start_factors.emplace_shared>(0, cur_pose); Values init; Values expected; @@ -106,7 +106,7 @@ TEST(testNonlinearISAM, markov_chain_with_disconnects ) { Pose2 z(1.0, 2.0, 0.1); for (size_t i=1; i<=nrPoses; ++i) { NonlinearFactorGraph new_factors; - new_factors += BetweenFactor(i-1, i, z, model3); + new_factors.emplace_shared>(i-1, i, z, model3); Values new_init; cur_pose = cur_pose.compose(z); @@ -161,7 +161,7 @@ TEST(testNonlinearISAM, markov_chain_with_reconnect ) { // create initial graph Pose2 cur_pose; // start at origin NonlinearFactorGraph start_factors; - start_factors += NonlinearEquality(0, cur_pose); + start_factors.emplace_shared>(0, cur_pose); Values init; Values expected; @@ -183,7 +183,7 @@ TEST(testNonlinearISAM, markov_chain_with_reconnect ) { Pose2 z(1.0, 2.0, 0.1); for (size_t i=1; i<=nrPoses; ++i) { NonlinearFactorGraph new_factors; - new_factors += BetweenFactor(i-1, i, z, model3); + new_factors.emplace_shared>(i-1, i, z, model3); Values new_init; cur_pose = cur_pose.compose(z); @@ -204,7 +204,7 @@ TEST(testNonlinearISAM, markov_chain_with_reconnect ) { // Reconnect with observation later if (i == 15) { - new_factors += BearingRangeFactor( + new_factors.emplace_shared>( i, lm1, cur_pose.bearing(landmark1), cur_pose.range(landmark1), model2); } diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index c15de4f5d..cac25c246 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -31,8 +31,6 @@ #include -#include -using boost::adaptors::map_values; #include #include @@ -192,7 +190,7 @@ TEST( NonlinearOptimizer, Factorization ) NonlinearFactorGraph graph; graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); - graph += BetweenFactor(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); + graph.emplace_shared>(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); Ordering ordering; ordering.push_back(X(1)); @@ -251,9 +249,9 @@ TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) { NonlinearFactorGraph fg; fg.addPrior(0, Pose2(0, 0, 0), noiseModel::Isotropic::Sigma(3, 1)); - fg += BetweenFactor(0, 1, Pose2(1, 0, M_PI / 2), + fg.emplace_shared>(0, 1, Pose2(1, 0, M_PI / 2), noiseModel::Isotropic::Sigma(3, 1)); - fg += BetweenFactor(1, 2, Pose2(1, 0, M_PI / 2), + fg.emplace_shared>(1, 2, Pose2(1, 0, M_PI / 2), noiseModel::Isotropic::Sigma(3, 1)); Values init; @@ -302,7 +300,9 @@ TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) { GaussianFactorGraph::shared_ptr linear = optimizer.linearize(); VectorValues d = linear->hessianDiagonal(); VectorValues sqrtHessianDiagonal = d; - for (Vector& v : sqrtHessianDiagonal | map_values) v = v.cwiseSqrt(); + for (auto& [key, value] : sqrtHessianDiagonal) { + value = value.cwiseSqrt(); + } GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear, sqrtHessianDiagonal); VectorValues expectedDiagonal = d + params.lambdaInitial * d; EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal())); @@ -352,10 +352,10 @@ TEST(NonlinearOptimizer, Pose2OptimizationWithHuberNoOutlier) { NonlinearFactorGraph fg; fg.addPrior(0, Pose2(0,0,0), noiseModel::Isotropic::Sigma(3,1)); - fg += BetweenFactor(0, 1, Pose2(1,1.1,M_PI/4), + fg.emplace_shared>(0, 1, Pose2(1,1.1,M_PI/4), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(2.0), noiseModel::Isotropic::Sigma(3,1))); - fg += BetweenFactor(0, 1, Pose2(1,0.9,M_PI/2), + fg.emplace_shared>(0, 1, Pose2(1,0.9,M_PI/2), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(3.0), noiseModel::Isotropic::Sigma(3,1))); @@ -383,13 +383,13 @@ TEST(NonlinearOptimizer, Point2LinearOptimizationWithHuber) { NonlinearFactorGraph fg; fg.addPrior(0, Point2(0,0), noiseModel::Isotropic::Sigma(2,0.01)); - fg += BetweenFactor(0, 1, Point2(1,1.8), + fg.emplace_shared>(0, 1, Point2(1,1.8), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0), noiseModel::Isotropic::Sigma(2,1))); - fg += BetweenFactor(0, 1, Point2(1,0.9), + fg.emplace_shared>(0, 1, Point2(1,0.9), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0), noiseModel::Isotropic::Sigma(2,1))); - fg += BetweenFactor(0, 1, Point2(1,90), + fg.emplace_shared>(0, 1, Point2(1,90), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0), noiseModel::Isotropic::Sigma(2,1))); @@ -417,16 +417,16 @@ TEST(NonlinearOptimizer, Pose2OptimizationWithHuber) { NonlinearFactorGraph fg; fg.addPrior(0, Pose2(0,0, 0), noiseModel::Isotropic::Sigma(3,0.1)); - fg += BetweenFactor(0, 1, Pose2(0,9, M_PI/2), + fg.emplace_shared>(0, 1, Pose2(0,9, M_PI/2), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2), noiseModel::Isotropic::Sigma(3,1))); - fg += BetweenFactor(0, 1, Pose2(0, 11, M_PI/2), + fg.emplace_shared>(0, 1, Pose2(0, 11, M_PI/2), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2), noiseModel::Isotropic::Sigma(3,1))); - fg += BetweenFactor(0, 1, Pose2(0, 10, M_PI/2), + fg.emplace_shared>(0, 1, Pose2(0, 10, M_PI/2), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2), noiseModel::Isotropic::Sigma(3,1))); - fg += BetweenFactor(0, 1, Pose2(0,9, 0), + fg.emplace_shared>(0, 1, Pose2(0,9, 0), noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2), noiseModel::Isotropic::Sigma(3,1))); @@ -495,7 +495,7 @@ TEST(NonlinearOptimizer, disconnected_graph) { NonlinearFactorGraph graph; graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3,1)); - graph += BetweenFactor(X(1),X(2), Pose2(1.5,0.,0.), noiseModel::Isotropic::Sigma(3,1)); + graph.emplace_shared>(X(1),X(2), Pose2(1.5,0.,0.), noiseModel::Isotropic::Sigma(3,1)); graph.addPrior(X(3), Pose2(3.,0.,0.), noiseModel::Isotropic::Sigma(3,1)); EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(graph, init).optimize())); @@ -541,7 +541,7 @@ TEST(NonlinearOptimizer, subclass_solver) { NonlinearFactorGraph graph; graph.addPrior(X(1), Pose2(0., 0., 0.), noiseModel::Isotropic::Sigma(3, 1)); - graph += BetweenFactor(X(1), X(2), Pose2(1.5, 0., 0.), + graph.emplace_shared>(X(1), X(2), Pose2(1.5, 0., 0.), noiseModel::Isotropic::Sigma(3, 1)); graph.addPrior(X(3), Pose2(3., 0., 0.), noiseModel::Isotropic::Sigma(3, 1)); diff --git a/tests/testPCGSolver.cpp b/tests/testPCGSolver.cpp index 9497c00d7..bc9f9e608 100644 --- a/tests/testPCGSolver.cpp +++ b/tests/testPCGSolver.cpp @@ -84,13 +84,13 @@ TEST( GaussianFactorGraphSystem, multiply_getb) // Create a Gaussian Factor Graph GaussianFactorGraph simpleGFG; SharedDiagonal unit2 = noiseModel::Diagonal::Sigmas(Vector2(0.5, 0.3)); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << -1, -1).finished(), unit2); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< -10, 0, 0, -10).finished(), 0, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << 2, -1).finished(), unit2); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << 0, 1).finished(), unit2); - simpleGFG += JacobianFactor(0, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << -1, 1.5).finished(), unit2); - simpleGFG += JacobianFactor(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); - simpleGFG += JacobianFactor(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << -1, -1).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< -10, 0, 0, -10).finished(), 0, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << 2, -1).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << 0, 1).finished(), unit2); + simpleGFG.emplace_shared(0, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << -1, 1.5).finished(), unit2); + simpleGFG.emplace_shared(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); + simpleGFG.emplace_shared(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); // Create a dummy-preconditioner and a GaussianFactorGraphSystem DummyPreconditioner dummyPreconditioner; diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index 8e3c6dcc5..ecdf36322 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -39,7 +39,7 @@ TEST( PCGsolver, verySimpleLinearSystem) { // Create a Gaussian Factor Graph GaussianFactorGraph simpleGFG; - simpleGFG += JacobianFactor(0, (Matrix(2,2)<< 4, 1, 1, 3).finished(), (Vector(2) << 1,2 ).finished(), noiseModel::Unit::Create(2)); + simpleGFG.emplace_shared(0, (Matrix(2,2)<< 4, 1, 1, 3).finished(), (Vector(2) << 1,2 ).finished(), noiseModel::Unit::Create(2)); // Exact solution already known VectorValues exactSolution; @@ -81,13 +81,13 @@ TEST(PCGSolver, simpleLinearSystem) { GaussianFactorGraph simpleGFG; //SharedDiagonal unit2 = noiseModel::Unit::Create(2); SharedDiagonal unit2 = noiseModel::Diagonal::Sigmas(Vector2(0.5, 0.3)); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << -1, -1).finished(), unit2); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< -10, 0, 0, -10).finished(), 0, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << 2, -1).finished(), unit2); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << 0, 1).finished(), unit2); - simpleGFG += JacobianFactor(0, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << -1, 1.5).finished(), unit2); - simpleGFG += JacobianFactor(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); - simpleGFG += JacobianFactor(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); - simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << -1, -1).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< -10, 0, 0, -10).finished(), 0, (Matrix(2,2)<< 10, 0, 0, 10).finished(), (Vector(2) << 2, -1).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << 0, 1).finished(), unit2); + simpleGFG.emplace_shared(0, (Matrix(2,2)<< -5, 0, 0, -5).finished(), 1, (Matrix(2,2)<< 5, 0, 0, 5).finished(), (Vector(2) << -1, 1.5).finished(), unit2); + simpleGFG.emplace_shared(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); + simpleGFG.emplace_shared(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); + simpleGFG.emplace_shared(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2); //simpleGFG.print("system"); // Expected solution diff --git a/tests/testRot3Optimization.cpp b/tests/testRot3Optimization.cpp index 5746022a3..5cb31ef0a 100644 --- a/tests/testRot3Optimization.cpp +++ b/tests/testRot3Optimization.cpp @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -32,6 +31,8 @@ using namespace gtsam; typedef BetweenFactor Between; typedef NonlinearFactorGraph Graph; +using symbol_shorthand::R; + /* ************************************************************************* */ TEST(Rot3, optimize) { @@ -39,11 +40,11 @@ TEST(Rot3, optimize) { Values truth; Values initial; Graph fg; - fg.addPrior(Symbol('r',0), Rot3(), noiseModel::Isotropic::Sigma(3, 0.01)); + fg.addPrior(R(0), Rot3(), noiseModel::Isotropic::Sigma(3, 0.01)); for(int j=0; j<6; ++j) { - truth.insert(Symbol('r',j), Rot3::Rz(M_PI/3.0 * double(j))); - initial.insert(Symbol('r',j), Rot3::Rz(M_PI/3.0 * double(j) + 0.1 * double(j%2))); - fg += Between(Symbol('r',j), Symbol('r',(j+1)%6), Rot3::Rz(M_PI/3.0), noiseModel::Isotropic::Sigma(3, 0.01)); + truth.insert(R(j), Rot3::Rz(M_PI/3.0 * double(j))); + initial.insert(R(j), Rot3::Rz(M_PI/3.0 * double(j) + 0.1 * double(j%2))); + fg.emplace_shared(R(j), R((j+1)%6), Rot3::Rz(M_PI/3.0), noiseModel::Isotropic::Sigma(3, 0.01)); } Values final = GaussNewtonOptimizer(fg, initial).optimize(); diff --git a/tests/testSerializationSlam.cpp b/tests/testSerializationSlam.cpp index e03ef015b..c4170b108 100644 --- a/tests/testSerializationSlam.cpp +++ b/tests/testSerializationSlam.cpp @@ -217,7 +217,7 @@ BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D") TEST (testSerializationSLAM, smallExample_linear) { using namespace example; - Ordering ordering; ordering += X(1),X(2),L(1); + const Ordering ordering{X(1), X(2), L(1)}; EXPECT(equalsObj(ordering)); EXPECT(equalsXML(ordering)); EXPECT(equalsBinary(ordering)); diff --git a/tests/testSimulated3D.cpp b/tests/testSimulated3D.cpp index cfe00856a..9bc67c115 100644 --- a/tests/testSimulated3D.cpp +++ b/tests/testSimulated3D.cpp @@ -21,7 +21,6 @@ #include #include -#include #include #include diff --git a/tests/testSubgraphPreconditioner.cpp b/tests/testSubgraphPreconditioner.cpp index 8abd54795..994fe5112 100644 --- a/tests/testSubgraphPreconditioner.cpp +++ b/tests/testSubgraphPreconditioner.cpp @@ -29,9 +29,6 @@ #include -#include -#include - #include using namespace std; @@ -45,11 +42,9 @@ Symbol key(int x, int y) { return symbol_shorthand::X(1000 * x + y); } /* ************************************************************************* */ TEST(SubgraphPreconditioner, planarOrdering) { // Check canonical ordering - Ordering expected, ordering = planarOrdering(3); - expected += - key(3, 3), key(2, 3), key(1, 3), - key(3, 2), key(2, 2), key(1, 2), - key(3, 1), key(2, 1), key(1, 1); + Ordering ordering = planarOrdering(3), + expected{key(3, 3), key(2, 3), key(1, 3), key(3, 2), key(2, 2), + key(1, 2), key(3, 1), key(2, 1), key(1, 1)}; EXPECT(assert_equal(expected, ordering)); } @@ -65,9 +60,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) { /* ************************************************************************* */ TEST(SubgraphPreconditioner, planarGraph) { // Check planar graph construction - GaussianFactorGraph A; - VectorValues xtrue; - boost::tie(A, xtrue) = planarGraph(3); + const auto [A, xtrue] = planarGraph(3); LONGS_EQUAL(13, A.size()); LONGS_EQUAL(9, xtrue.size()); DOUBLES_EQUAL(0, error(A, xtrue), 1e-9); // check zero error for xtrue @@ -81,13 +74,10 @@ TEST(SubgraphPreconditioner, planarGraph) { /* ************************************************************************* */ TEST(SubgraphPreconditioner, splitOffPlanarTree) { // Build a planar graph - GaussianFactorGraph A; - VectorValues xtrue; - boost::tie(A, xtrue) = planarGraph(3); + const auto [A, xtrue] = planarGraph(3); // Get the spanning tree and constraints, and check their sizes - GaussianFactorGraph T, C; - boost::tie(T, C) = splitOffPlanarTree(3, A); + const auto [T, C] = splitOffPlanarTree(3, A); LONGS_EQUAL(9, T.size()); LONGS_EQUAL(4, C.size()); @@ -100,14 +90,11 @@ TEST(SubgraphPreconditioner, splitOffPlanarTree) { /* ************************************************************************* */ TEST(SubgraphPreconditioner, system) { // Build a planar graph - GaussianFactorGraph Ab; - VectorValues xtrue; size_t N = 3; - boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b + const auto [Ab, xtrue] = planarGraph(N); // A*x-b // Get the spanning tree and remaining graph - GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2 - boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab); + auto [Ab1, Ab2] = splitOffPlanarTree(N, Ab); // Eliminate the spanning tree to build a prior const Ordering ord = planarOrdering(N); @@ -123,11 +110,9 @@ TEST(SubgraphPreconditioner, system) { Ab2.add(key(1, 1), Z_2x2, Z_2x1); Ab2.add(key(1, 2), Z_2x2, Z_2x1); Ab2.add(key(1, 3), Z_2x2, Z_2x1); - Matrix A, A1, A2; - Vector b, b1, b2; - std::tie(A, b) = Ab.jacobian(ordering); - std::tie(A1, b1) = Ab1.jacobian(ordering); - std::tie(A2, b2) = Ab2.jacobian(ordering); + const auto [A, b] = Ab.jacobian(ordering); + const auto [A1, b1] = Ab1.jacobian(ordering); + const auto [A2, b2] = Ab2.jacobian(ordering); Matrix R1 = Rc1.matrix(ordering).first; Matrix Abar(13 * 2, 9 * 2); Abar.topRows(9 * 2) = Matrix::Identity(9 * 2, 9 * 2); @@ -196,14 +181,11 @@ TEST(SubgraphPreconditioner, system) { /* ************************************************************************* */ TEST(SubgraphPreconditioner, conjugateGradients) { // Build a planar graph - GaussianFactorGraph Ab; - VectorValues xtrue; size_t N = 3; - boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b + const auto [Ab, xtrue] = planarGraph(N); // A*x-b // Get the spanning tree - GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2 - boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab); + const auto [Ab1, Ab2] = splitOffPlanarTree(N, Ab); // Eliminate the spanning tree to build a prior GaussianBayesNet Rc1 = *Ab1.eliminateSequential(); // R1*x-c1 diff --git a/tests/testSubgraphSolver.cpp b/tests/testSubgraphSolver.cpp index 69b5fe5f9..026f3c919 100644 --- a/tests/testSubgraphSolver.cpp +++ b/tests/testSubgraphSolver.cpp @@ -55,9 +55,7 @@ TEST( SubgraphSolver, Parameters ) TEST( SubgraphSolver, splitFactorGraph ) { // Build a planar graph - GaussianFactorGraph Ab; - VectorValues xtrue; - std::tie(Ab, xtrue) = example::planarGraph(N); // A*x-b + const auto [Ab, xtrue] = example::planarGraph(N); // A*x-b SubgraphBuilderParameters params; params.augmentationFactor = 0.0; @@ -65,8 +63,7 @@ TEST( SubgraphSolver, splitFactorGraph ) auto subgraph = builder(Ab); EXPECT_LONGS_EQUAL(9, subgraph.size()); - GaussianFactorGraph Ab1, Ab2; - std::tie(Ab1, Ab2) = splitFactorGraph(Ab, subgraph); + const auto [Ab1, Ab2] = splitFactorGraph(Ab, subgraph); EXPECT_LONGS_EQUAL(9, Ab1.size()); EXPECT_LONGS_EQUAL(13, Ab2.size()); } @@ -75,9 +72,7 @@ TEST( SubgraphSolver, splitFactorGraph ) TEST( SubgraphSolver, constructor1 ) { // Build a planar graph - GaussianFactorGraph Ab; - VectorValues xtrue; - std::tie(Ab, xtrue) = example::planarGraph(N); // A*x-b + const auto [Ab, xtrue] = example::planarGraph(N); // A*x-b // The first constructor just takes a factor graph (and kParameters) // and it will split the graph into A1 and A2, where A1 is a spanning tree @@ -90,14 +85,11 @@ TEST( SubgraphSolver, constructor1 ) TEST( SubgraphSolver, constructor2 ) { // Build a planar graph - GaussianFactorGraph Ab; - VectorValues xtrue; size_t N = 3; - std::tie(Ab, xtrue) = example::planarGraph(N); // A*x-b + const auto [Ab, xtrue] = example::planarGraph(N); // A*x-b - // Get the spanning tree - GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2 - std::tie(Ab1, Ab2) = example::splitOffPlanarTree(N, Ab); + // Get the spanning tree, A1*x-b1 and A2*x-b2 + const auto [Ab1, Ab2] = example::splitOffPlanarTree(N, Ab); // The second constructor takes two factor graphs, so the caller can specify // the preconditioner (Ab1) and the constraints that are left out (Ab2) @@ -110,14 +102,12 @@ TEST( SubgraphSolver, constructor2 ) TEST( SubgraphSolver, constructor3 ) { // Build a planar graph - GaussianFactorGraph Ab; - VectorValues xtrue; size_t N = 3; - std::tie(Ab, xtrue) = example::planarGraph(N); // A*x-b + const auto [Ab, xtrue] = example::planarGraph(N); // A*x-b // Get the spanning tree and corresponding kOrdering - GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2 - std::tie(Ab1, Ab2) = example::splitOffPlanarTree(N, Ab); + // A1*x-b1 and A2*x-b2 + const auto [Ab1, Ab2] = example::splitOffPlanarTree(N, Ab); // The caller solves |A1*x-b1|^2 == |R1*x-c1|^2, where R1 is square UT auto Rc1 = *Ab1.eliminateSequential(); diff --git a/timing/CMakeLists.txt b/timing/CMakeLists.txt index fc16d3ac8..2ebfeb9d8 100644 --- a/timing/CMakeLists.txt +++ b/timing/CMakeLists.txt @@ -1,3 +1,22 @@ -gtsamAddTimingGlob("*.cpp" "" "gtsam") +set (excluded_scripts + elaboratePoint2KalmanFilter.cpp +) +# Add scripts to exclude if GTSAM_ENABLE_BOOST_SERIALIZATION is not set +if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION) + # add to excluded scripts + list (APPEND excluded_scripts + "timeIncremental.cpp" + ) +endif() + +# Add scripts to exclude if GTSAM_USE_BOOST_FEATURES is not set +if (NOT GTSAM_USE_BOOST_FEATURES) + # add to excluded scripts + list (APPEND excluded_scripts + "timeISAM2Chain.cpp" + ) +endif() + +gtsamAddTimingGlob("*.cpp" "${excluded_scripts}" "gtsam") target_link_libraries(timeGaussianFactorGraph CppUnitLite) diff --git a/timing/timeGaussianFactor.cpp b/timing/timeGaussianFactor.cpp index feb738439..04bd090e1 100644 --- a/timing/timeGaussianFactor.cpp +++ b/timing/timeGaussianFactor.cpp @@ -21,7 +21,6 @@ #include using namespace std; -#include #include #include @@ -106,7 +105,7 @@ int main() JacobianFactor::shared_ptr factor; for(int i = 0; i < n; i++) - boost::tie(conditional, factor) = + std::tie(conditional, factor) = JacobianFactor(combined).eliminate(Ordering{_x2_}); long timeLog2 = clock(); @@ -116,8 +115,7 @@ int main() cout << ((double)n/seconds) << " calls/second" << endl; // time matrix_augmented -// Ordering ordering; -// ordering += _x2_, _l1_, _x1_; +// const Ordering ordering{_x2_, _l1_, _x1_}; // size_t n1 = 10000000; // timeLog = clock(); // diff --git a/timing/timeGaussianFactorGraph.cpp b/timing/timeGaussianFactorGraph.cpp index 574579f84..a5e9b6eed 100644 --- a/timing/timeGaussianFactorGraph.cpp +++ b/timing/timeGaussianFactorGraph.cpp @@ -63,7 +63,7 @@ double timePlanarSmootherEliminate(int N, bool old = true) { //double timePlanarSmootherJoinAug(int N, size_t reps) { // GaussianFactorGraph fgBase; // VectorValues config; -// boost::tie(fgBase,config) = planarGraph(N); +// std::tie(fgBase,config) = planarGraph(N); // Ordering ordering = fgBase.getOrdering(); // Symbol key = ordering.front(); // @@ -96,7 +96,7 @@ double timePlanarSmootherEliminate(int N, bool old = true) { //double timePlanarSmootherCombined(int N, size_t reps) { // GaussianFactorGraph fgBase; // VectorValues config; -// boost::tie(fgBase,config) = planarGraph(N); +// std::tie(fgBase,config) = planarGraph(N); // Ordering ordering = fgBase.getOrdering(); // Symbol key = ordering.front(); // diff --git a/timing/timeIncremental.cpp b/timing/timeIncremental.cpp index b8bcca5e7..a1948f115 100644 --- a/timing/timeIncremental.cpp +++ b/timing/timeIncremental.cpp @@ -27,7 +27,6 @@ #include #include #include -#include using namespace std; using namespace gtsam; @@ -225,9 +224,14 @@ int main(int argc, char *argv[]) { try { Marginals marginals(graph, values); int i=0; - for (Key key1: boost::adaptors::reverse(values.keys())) { + // Assign the keyvector to a named variable + auto keys = values.keys(); + // Iterate over it in reverse + for (auto it1 = keys.rbegin(); it1 != keys.rend(); ++it1) { + Key key1 = *it1; int j=0; - for (Key key2: boost::adaptors::reverse(values.keys())) { + for (auto it2 = keys.rbegin(); it2 != keys.rend(); ++it2) { + Key key2 = *it2; if(i != j) { gttic_(jointMarginalInformation); KeyVector keys(2); diff --git a/timing/timeLago.cpp b/timing/timeLago.cpp index bb506de36..906447950 100644 --- a/timing/timeLago.cpp +++ b/timing/timeLago.cpp @@ -33,11 +33,9 @@ int main(int argc, char *argv[]) { size_t trials = 1; // read graph - Values::shared_ptr solution; - NonlinearFactorGraph::shared_ptr g; string inputFile = findExampleDataFile("w10000"); auto model = noiseModel::Diagonal::Sigmas((Vector(3) << 0.05, 0.05, 5.0 * M_PI / 180.0).finished()); - boost::tie(g, solution) = load2D(inputFile, model); + const auto [g, solution] = load2D(inputFile, model); // add noise to create initial estimate Values initial; diff --git a/timing/timeMatrixOps.cpp b/timing/timeMatrixOps.cpp index 95333fbf8..2e252e7c6 100644 --- a/timing/timeMatrixOps.cpp +++ b/timing/timeMatrixOps.cpp @@ -16,9 +16,6 @@ * @date Sep 18, 2010 */ -#include -#include - #include #include @@ -30,8 +27,6 @@ using namespace std; //namespace ublas = boost::numeric::ublas; //using namespace Eigen; -using boost::format; -using namespace boost::lambda; static std::mt19937 rng; static std::uniform_real_distribution<> uniform(-1.0, 0.0); @@ -61,10 +56,10 @@ int main(int argc, char* argv[]) { gtsam::SubMatrix top = mat.block(0, 0, n, n); gtsam::SubMatrix block = mat.block(m/4, n/4, m-m/2, n-n/2); - cout << format(" Basic: %1%x%2%\n") % (int)m % (int)n; - cout << format(" Full: mat(%1%:%2%, %3%:%4%)\n") % 0 % (int)m % 0 % (int)n; - cout << format(" Top: mat(%1%:%2%, %3%:%4%)\n") % 0 % (int)n % 0 % (int)n; - cout << format(" Block: mat(%1%:%2%, %3%:%4%)\n") % size_t(m/4) % size_t(m-m/4) % size_t(n/4) % size_t(n-n/4); + cout << " Basic: " << (int)m << "x" << (int)n << endl; + cout << " Full: mat(" << 0 << ":" << (int)m << ", " << 0 << ":" << (int)n << ")" << endl; + cout << " Top: mat(" << 0 << ":" << (int)n << ", " << 0 << ":" << (int)n << ")" << endl; + cout << " Block: mat(" << size_t(m/4) << ":" << size_t(m-m/4) << ", " << size_t(n/4) << ":" << size_t(n-n/4) << ")" << endl; cout << endl; { @@ -87,7 +82,7 @@ int main(int argc, char* argv[]) { tictoc_getNode(basicTimeNode, basicTime); basicTime = basicTimeNode->secs(); gtsam::tictoc_reset_(); - cout << format(" Basic: %1% mus/element") % double(1000000 * basicTime / double(mat.rows()*mat.cols()*nReps)) << endl; + cout << " Basic: " << double(1000000 * basicTime / double(mat.rows()*mat.cols()*nReps)) << " μs/element" << endl; gttic_(fullTime); for(size_t rep=0; repsecs(); gtsam::tictoc_reset_(); - cout << format(" Full: %1% mus/element") % double(1000000 * fullTime / double(full.rows()*full.cols()*nReps)) << endl; + cout << " Full: " << double(1000000 * fullTime / double(full.rows()*full.cols()*nReps)) << " μs/element" << endl; gttic_(topTime); for(size_t rep=0; repsecs(); gtsam::tictoc_reset_(); - cout << format(" Top: %1% mus/element") % double(1000000 * topTime / double(top.rows()*top.cols()*nReps)) << endl; + cout << " Top: " << double(1000000 * topTime / double(top.rows()*top.cols()*nReps)) << " μs/element" << endl; gttic_(blockTime); for(size_t rep=0; repsecs(); gtsam::tictoc_reset_(); - cout << format(" Block: %1% mus/element") % double(1000000 * blockTime / double(block.rows()*block.cols()*nReps)) << endl; + cout << " Block: " << double(1000000 * blockTime / double(block.rows()*block.cols()*nReps)) << " μs/element" << endl; cout << endl; } @@ -145,7 +140,7 @@ int main(int argc, char* argv[]) { tictoc_getNode(basicTimeNode, basicTime); basicTime = basicTimeNode->secs(); gtsam::tictoc_reset_(); - cout << format(" Basic: %1% mus/element") % double(1000000 * basicTime / double(mat.rows()*mat.cols()*nReps)) << endl; + cout << " Basic: " << double(1000000 * basicTime / double(mat.rows()*mat.cols()*nReps)) << " μs/element" << endl; gttic_(fullTime); for(size_t rep=0; repsecs(); gtsam::tictoc_reset_(); - cout << format(" Full: %1% mus/element") % double(1000000 * fullTime / double(full.rows()*full.cols()*nReps)) << endl; + cout << " Full: " << double(1000000 * fullTime / double(full.rows()*full.cols()*nReps)) << " μs/element" << endl; gttic_(topTime); for(size_t rep=0; repsecs(); gtsam::tictoc_reset_(); - cout << format(" Top: %1% mus/element") % double(1000000 * topTime / double(top.rows()*top.cols()*nReps)) << endl; + cout << " Top: " << double(1000000 * topTime / double(top.rows()*top.cols()*nReps)) << " μs/element" << endl; gttic_(blockTime); for(size_t rep=0; repsecs(); gtsam::tictoc_reset_(); - cout << format(" Block: %1% mus/element") % double(1000000 * blockTime / double(block.rows()*block.cols()*nReps)) << endl; + cout << " Block: " << double(1000000 * blockTime / double(block.rows()*block.cols()*nReps)) << " μs/element" << endl; cout << endl; } { double basicTime, fullTime, topTime, blockTime; - typedef pair ij_t; - vector ijs(100000); + typedef std::pair ij_t; + std::vector ijs(100000); cout << "Row-major matrix, random assignment:" << endl; // Do a few initial assignments to let any cache effects stabilize - for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng),uniform_j(rng))); + for (auto& ij : ijs) ij = {uniform_i(rng), uniform_j(rng)}; for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { mat(ij.first, ij.second) = uniform(rng); } + for(const auto& [i, j]: ijs) { mat(i, j) = uniform(rng); } gttic_(basicTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng),uniform_j(rng))); + for (auto& ij : ijs) ij = {uniform_i(rng), uniform_j(rng)}; for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { mat(ij.first, ij.second) = uniform(rng); } + for(const auto& [i, j]: ijs) { mat(i, j) = uniform(rng); } gttoc_(basicTime); tictoc_getNode(basicTimeNode, basicTime); basicTime = basicTimeNode->secs(); gtsam::tictoc_reset_(); - cout << format(" Basic: %1% mus/element") % double(1000000 * basicTime / double(ijs.size()*nReps)) << endl; + cout << " Basic: " << double(1000000 * basicTime / double(ijs.size()*nReps)) << " μs/element" << endl; gttic_(fullTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng),uniform_j(rng))); + for (auto& ij : ijs) ij = {uniform_i(rng), uniform_j(rng)}; for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { full(ij.first, ij.second) = uniform(rng); } + for(const auto& [i, j]: ijs) { full(i, j) = uniform(rng); } gttoc_(fullTime); tictoc_getNode(fullTimeNode, fullTime); fullTime = fullTimeNode->secs(); gtsam::tictoc_reset_(); - cout << format(" Full: %1% mus/element") % double(1000000 * fullTime / double(ijs.size()*nReps)) << endl; + cout << " Full: " << double(1000000 * fullTime / double(ijs.size()*nReps)) << " μs/element" << endl; gttic_(topTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng)%top.rows(),uniform_j(rng))); + for (auto& ij : ijs) ij = {uniform_i(rng) % top.rows(), uniform_j(rng)}; for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { top(ij.first, ij.second) = uniform(rng); } + for(const auto& [i, j]: ijs) { top(i, j) = uniform(rng); } gttoc_(topTime); tictoc_getNode(topTimeNode, topTime); topTime = topTimeNode->secs(); gtsam::tictoc_reset_(); - cout << format(" Top: %1% mus/element") % double(1000000 * topTime / double(ijs.size()*nReps)) << endl; + cout << " Top: " << double(1000000 * topTime / double(ijs.size()*nReps)) << " μs/element" << endl; gttic_(blockTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng)%block.rows(),uniform_j(rng)%block.cols())); + for (auto& ij : ijs) + ij = {uniform_i(rng) % block.rows(), uniform_j(rng) % block.cols()}; for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { block(ij.first, ij.second) = uniform(rng); } + for(const auto& [i, j]: ijs) { block(i, j) = uniform(rng); } gttoc_(blockTime); tictoc_getNode(blockTimeNode, blockTime); blockTime = blockTimeNode->secs(); gtsam::tictoc_reset_(); - cout << format(" Block: %1% mus/element") % double(1000000 * blockTime / double(ijs.size()*nReps)) << endl; + cout << " Block: " << double(1000000 * blockTime / double(ijs.size()*nReps)) << " μs/element" << endl; cout << endl; } diff --git a/timing/timeVirtual.cpp b/timing/timeVirtual.cpp index d4f9634d5..62133761a 100644 --- a/timing/timeVirtual.cpp +++ b/timing/timeVirtual.cpp @@ -19,12 +19,10 @@ #include #include -#include #include using namespace std; -using namespace boost; using namespace gtsam; struct Plain { @@ -48,14 +46,6 @@ struct VirtualCounted { virtual ~VirtualCounted() {} }; -void intrusive_ptr_add_ref(VirtualCounted* obj) { ++ obj->refCount; } -void intrusive_ptr_release(VirtualCounted* obj) { - assert(obj->refCount > 0); - -- obj->refCount; - if(obj->refCount == 0) - delete obj; -} - int main(int argc, char *argv[]) { size_t trials = 10000000; @@ -145,7 +135,7 @@ int main(int argc, char *argv[]) { gttic_(intrusive_virtual_alloc_dealloc_call); for(size_t i=0; i obj(new VirtualCounted(i)); + std::shared_ptr obj(new VirtualCounted(i)); obj->setData(i+1); } gttoc_(intrusive_virtual_alloc_dealloc_call); diff --git a/timing/timeVirtual2.cpp b/timing/timeVirtual2.cpp index 3d6e02288..dd6c8e268 100644 --- a/timing/timeVirtual2.cpp +++ b/timing/timeVirtual2.cpp @@ -21,7 +21,6 @@ #include using namespace std; -using namespace boost; using namespace gtsam; struct DtorTestBase { diff --git a/wrap/matlab.h b/wrap/matlab.h index 645ba8edf..5c38ac808 100644 --- a/wrap/matlab.h +++ b/wrap/matlab.h @@ -490,7 +490,7 @@ Class* unwrap_ptr(const mxArray* obj, const string& propertyName) { //template <> //Vector unwrap_shared_ptr(const mxArray* obj, const string& propertyName) { // bool unwrap_shared_ptr_Vector_attempted = false; -// BOOST_STATIC_ASSERT(unwrap_shared_ptr_Vector_attempted, "Vector cannot be unwrapped as a shared pointer"); +// static_assert(unwrap_shared_ptr_Vector_attempted, "Vector cannot be unwrapped as a shared pointer"); // return Vector(); //} @@ -498,7 +498,7 @@ Class* unwrap_ptr(const mxArray* obj, const string& propertyName) { //template <> //Matrix unwrap_shared_ptr(const mxArray* obj, const string& propertyName) { // bool unwrap_shared_ptr_Matrix_attempted = false; -// BOOST_STATIC_ASSERT(unwrap_shared_ptr_Matrix_attempted, "Matrix cannot be unwrapped as a shared pointer"); +// static_assert(unwrap_shared_ptr_Matrix_attempted, "Matrix cannot be unwrapped as a shared pointer"); // return Matrix(); //}